source
stringlengths
3
92
original_c
stringlengths
26
2.25M
no_omp_formatted
stringlengths
0
2.25M
omp_formatted
stringlengths
0
2.25M
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double SaneStrokeWidth(const Image *image, const DrawInfo *draw_info) { return(MagickMin((double) draw_info->stroke_width, (2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* SaneStrokeWidth(image,clone_info)/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } MagickExport int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* StringToDouble(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double ConstrainCoordinate(double x) { if (x < (double) -(SSIZE_MAX-512)) return((double) -(SSIZE_MAX-512)); if (x > (double) (SSIZE_MAX-512)) return((double) (SSIZE_MAX-512)); return(x); } static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5)); y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (coordinates > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(MaxStrokePad,MaxStrokePad); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* * Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* * Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo * points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo ** primitive_info; size_t * extent; ssize_t offset; PointInfo point; ExceptionInfo * exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo * edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* * Forward declarations. */ static Image * DrawClippingMask(Image *, const DrawInfo *, const char *, const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *, const DrawInfo *, const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *, const DrawInfo *, const size_t, ExceptionInfo *), TraceArc(MVGInfo *, const PointInfo, const PointInfo, const PointInfo), TraceArcPath(MVGInfo *, const PointInfo, const PointInfo, const PointInfo, const double, const MagickBooleanType, const MagickBooleanType), TraceBezier(MVGInfo *, const size_t), TraceCircle(MVGInfo *, const PointInfo, const PointInfo), TraceEllipse(MVGInfo *, const PointInfo, const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRectangle(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRoundRectangle(MVGInfo *, const PointInfo, const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *, const size_t, const double); static PrimitiveInfo * TraceStrokePolygon(const Image *, const DrawInfo *, const PrimitiveInfo *); static ssize_t TracePath(MVGInfo *, const char *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireDrawInfo() returns a DrawInfo structure properly * initialized. % % The format of the AcquireDrawInfo method is: % % * DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo * AcquireDrawInfo(void) { DrawInfo * draw_info; draw_info = (DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL, draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l o n e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. * If NULL % is specified, a new DrawInfo structure is created initialized * to default % values. % % The format of the CloneDrawInfo method is: % % * DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const * DrawInfo *draw_info) % % A description of each parameter follows: % % * o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo * CloneDrawInfo(const ImageInfo * image_info, const DrawInfo * draw_info) { DrawInfo * clone_info; ExceptionInfo * exception; clone_info = (DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info, clone_info); if (draw_info == (DrawInfo *) NULL) return (clone_info); exception = AcquireExceptionInfo(); if (draw_info->id != (char *)NULL) (void)CloneString(&clone_info->id, draw_info->id); if (draw_info->primitive != (char *)NULL) (void)CloneString(&clone_info->primitive, draw_info->primitive); if (draw_info->geometry != (char *)NULL) (void)CloneString(&clone_info->geometry, draw_info->geometry); clone_info->compliance = draw_info->compliance; clone_info->viewbox = draw_info->viewbox; clone_info->affine = draw_info->affine; clone_info->gravity = draw_info->gravity; clone_info->fill = draw_info->fill; clone_info->stroke = draw_info->stroke; clone_info->stroke_width = draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(draw_info->fill_pattern, 0, 0, MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern = CloneImage(draw_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke_antialias = draw_info->stroke_antialias; clone_info->text_antialias = draw_info->text_antialias; clone_info->fill_rule = draw_info->fill_rule; clone_info->linecap = draw_info->linecap; clone_info->linejoin = draw_info->linejoin; clone_info->miterlimit = draw_info->miterlimit; clone_info->dash_offset = draw_info->dash_offset; clone_info->decorate = draw_info->decorate; clone_info->compose = draw_info->compose; if (draw_info->text != (char *)NULL) (void)CloneString(&clone_info->text, draw_info->text); if (draw_info->font != (char *)NULL) (void)CloneString(&clone_info->font, draw_info->font); if (draw_info->metrics != (char *)NULL) (void)CloneString(&clone_info->metrics, draw_info->metrics); if (draw_info->family != (char *)NULL) (void)CloneString(&clone_info->family, draw_info->family); clone_info->style = draw_info->style; clone_info->stretch = draw_info->stretch; clone_info->weight = draw_info->weight; if (draw_info->encoding != (char *)NULL) (void)CloneString(&clone_info->encoding, draw_info->encoding); clone_info->pointsize = draw_info->pointsize; clone_info->kerning = draw_info->kerning; clone_info->interline_spacing = draw_info->interline_spacing; clone_info->interword_spacing = draw_info->interword_spacing; clone_info->direction = draw_info->direction; if (draw_info->density != (char *)NULL) (void)CloneString(&clone_info->density, draw_info->density); clone_info->align = draw_info->align; clone_info->undercolor = draw_info->undercolor; clone_info->border_color = draw_info->border_color; if (draw_info->server_name != (char *)NULL) (void)CloneString(&clone_info->server_name, draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) { register ssize_t x; for (x = 0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++); clone_info->dash_pattern = (double *)AcquireQuantumMemory((size_t) (2 * x + 2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)memset(clone_info->dash_pattern, 0, (size_t) (2 * x + 2) * sizeof(*clone_info->dash_pattern)); (void)memcpy(clone_info->dash_pattern, draw_info->dash_pattern, (size_t) (x + 1) * sizeof(*clone_info->dash_pattern)); } clone_info->gradient = draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops = clone_info->gradient.number_stops; clone_info->gradient.stops = (StopInfo *) AcquireQuantumMemory((size_t) number_stops, sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)memcpy(clone_info->gradient.stops, draw_info->gradient.stops, (size_t) number_stops * sizeof(*clone_info->gradient.stops)); } clone_info->bounds = draw_info->bounds; clone_info->fill_alpha = draw_info->fill_alpha; clone_info->stroke_alpha = draw_info->stroke_alpha; clone_info->element_reference = draw_info->element_reference; clone_info->clip_path = draw_info->clip_path; clone_info->clip_units = draw_info->clip_units; if (draw_info->clip_mask != (char *)NULL) (void)CloneString(&clone_info->clip_mask, draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask = CloneImage(draw_info->clipping_mask, 0, 0, MagickTrue, exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask = CloneImage(draw_info->composite_mask, 0, 0, MagickTrue, exception); clone_info->render = draw_info->render; clone_info->debug = IsEventLogging(); exception = DestroyExceptionInfo(exception); return (clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P a t h T o P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPathToPolygon() converts a path to the more efficient * sorted % rendering form. % % The format of the ConvertPathToPolygon * method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo * *path_info) % % A description of each parameter follows: % % o Method * ConvertPathToPolygon returns the path in a more efficient sorted % * rendering form of type PolygonInfo. % % o draw_info: Specifies a * pointer to an DrawInfo structure. % % o path_info: Specifies a pointer * to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge, const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo * p, *q; /* * Edge sorting for right-handed coordinate system. */ p = ((const EdgeInfo *)p_edge)->points; q = ((const EdgeInfo *)q_edge)->points; DrawCompareEdge(p[0].y, q[0].y); DrawCompareEdge(p[0].x, q[0].x); DrawCompareEdge((p[1].x - p[0].x) * (q[1].y - q[0].y), (p[1].y - p[0].y) * (q[1].x - q[0].x)); DrawCompareEdge(p[1].y, q[1].y); DrawCompareEdge(p[1].x, q[1].x); return (0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo * polygon_info) { register EdgeInfo * p; register ssize_t i, j; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin active-edge"); p = polygon_info->edges; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " edge %.20g:", (double)i); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " direction: %s", p->direction != MagickFalse ? "down" : "up"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " bounds: %g,%g - %g,%g", p->bounds.x1, p->bounds.y1, p->bounds.x2, p->bounds.y2); for (j = 0; j < (ssize_t) p->number_points; j++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g", p->points[j].x, p->points[j].y); p++; } (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end active-edge"); } static void ReversePoints(PointInfo * points, const size_t number_points) { PointInfo point; register ssize_t i; for (i = 0; i < (ssize_t) (number_points >> 1); i++) { point = points[i]; points[i] = points[number_points - (i + 1)]; points[number_points - (i + 1)] = point; } } static PolygonInfo * ConvertPathToPolygon(const PathInfo * path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo * polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* * Convert a path to the more efficient sorted rendering form. */ polygon_info = (PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return ((PolygonInfo *) NULL); number_edges = 16; polygon_info->edges = (EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); (void)memset(polygon_info->edges, 0, number_edges * sizeof(*polygon_info->edges)); direction = 0; edge = 0; ghostline = MagickFalse; n = 0; number_points = 0; points = (PointInfo *) NULL; (void)memset(&point, 0, sizeof(point)); (void)memset(&bounds, 0, sizeof(bounds)); polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = 0.0; polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) direction; polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->number_edges = 0; for (i = 0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* * Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; points = (PointInfo *) NULL; ghostline = MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } ghostline = path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point = path_info[i].point; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; direction = 0; n = 1; continue; } /* * Line to. */ next_direction = ((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y - point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* * New edge. */ point = points[n - 1]; if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); n = 1; ghostline = MagickFalse; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; edge++; } direction = next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points <<= 1; points = (PointInfo *) ResizeQuantumMemory(points, (size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } point = path_info[i].point; points[n] = point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.x > bounds.x2) bounds.x2 = point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points = (PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; ghostline = MagickFalse; edge++; } } polygon_info->number_edges = edge; qsort(polygon_info->edges, (size_t) polygon_info->number_edges, sizeof(*polygon_info->edges), DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return (polygon_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P r i m i t i v e T o P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into * a vector % path structure. % % The format of the ConvertPrimitiveToPath * method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo * *draw_info, % const PrimitiveInfo *primitive_info) % % A * description of each parameter follows: % % o Method * ConvertPrimitiveToPath returns a vector path structure of type % * PathInfo. % % o draw_info: a structure of type DrawInfo. % % o * primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo * path_info) { register const PathInfo * p; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin vector-path"); for (p = path_info; p->code != EndCode; p++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g %s", p->point.x, p->point.y, p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end vector-path"); } static PathInfo * ConvertPrimitiveToPath(const PrimitiveInfo * primitive_info) { MagickBooleanType closed_subpath; PathInfo * path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* * Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return ((PathInfo *) NULL); default: break; } for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); path_info = (PathInfo *) AcquireQuantumMemory((size_t) (3UL * i + 1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return ((PathInfo *) NULL); coordinates = 0; closed_subpath = MagickFalse; n = 0; p.x = (-1.0); p.y = (-1.0); q.x = (-1.0); q.y = (-1.0); start = 0; for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code = LineToCode; if (coordinates <= 0) { /* * New subpath. */ coordinates = (ssize_t) primitive_info[i].coordinates; p = primitive_info[i].point; start = n; code = MoveToCode; closed_subpath = primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x - primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y - primitive_info[i].point.y) >= MagickEpsilon)) { /* * Eliminate duplicate points. */ path_info[n].code = code; path_info[n].point = primitive_info[i].point; q = primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath = MagickFalse; continue; } /* * Mark the p point as open if the subpath is not closed. */ path_info[start].code = OpenCode; path_info[n].code = GhostlineCode; path_info[n].point = primitive_info[i].point; n++; path_info[n].code = LineToCode; path_info[n].point = p; n++; } path_info[n].code = EndCode; path_info[n].point.x = 0.0; path_info[n].point.y = 0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info = (PathInfo *) ResizeQuantumMemory(path_info, (size_t) (n + 1), sizeof(*path_info)); return (path_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo * structure. % % The format of the DestroyDrawInfo method is: % % * DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each * parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo * DestroyDrawInfo(DrawInfo * draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *)NULL) draw_info->id = DestroyString(draw_info->id); if (draw_info->primitive != (char *)NULL) draw_info->primitive = DestroyString(draw_info->primitive); if (draw_info->text != (char *)NULL) draw_info->text = DestroyString(draw_info->text); if (draw_info->geometry != (char *)NULL) draw_info->geometry = DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern = DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern = DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *)NULL) draw_info->font = DestroyString(draw_info->font); if (draw_info->metrics != (char *)NULL) draw_info->metrics = DestroyString(draw_info->metrics); if (draw_info->family != (char *)NULL) draw_info->family = DestroyString(draw_info->family); if (draw_info->encoding != (char *)NULL) draw_info->encoding = DestroyString(draw_info->encoding); if (draw_info->density != (char *)NULL) draw_info->density = DestroyString(draw_info->density); if (draw_info->server_name != (char *)NULL) draw_info->server_name = (char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) draw_info->dash_pattern = (double *)RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops = (StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *)NULL) draw_info->clip_mask = DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask = DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask = DestroyImage(draw_info->composite_mask); draw_info->signature = (~MagickCoreSignature); draw_info = (DrawInfo *) RelinquishMagickMemory(draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y E d g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyEdge() destroys the specified polygon edge. % % The * format of the DestroyEdge method is: % % ssize_t * DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description * of each parameter follows: % % o polygon_info: Specifies a pointer to * an PolygonInfo structure. % % o edge: the polygon edge number to * destroy. % */ static size_t DestroyEdge(PolygonInfo * polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points = (PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void)memmove(polygon_info->edges + edge, polygon_info->edges + edge + 1, (size_t) (polygon_info->number_edges - edge) * sizeof(*polygon_info->edges)); return (polygon_info->number_edges); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y P o l y g o n I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % * % The format of the DestroyPolygonInfo method is: % % PolygonInfo * *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each * parameter follows: % % o polygon_info: Specifies a pointer to an * PolygonInfo structure. % */ static PolygonInfo * DestroyPolygonInfo(PolygonInfo * polygon_info) { register ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points = (PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges = (EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return ((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w A f f i n e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawAffineImage() composites the source over the destination * image as % dictated by the affine transform. % % The format of the * DrawAffineImage method is: % % MagickBooleanType * DrawAffineImage(Image *image,const Image *source, % const * AffineMatrix *affine,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o source: the source * image. % % o affine: the affine transform. % % o exception: return * any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image * image, const AffineMatrix * affine, const double y, const SegmentInfo * edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* * Determine left and right edges. */ inverse_edge.x1 = edge->x1; inverse_edge.y1 = edge->y1; inverse_edge.x2 = edge->x2; inverse_edge.y2 = edge->y2; z = affine->ry * y + affine->tx; if (affine->sx >= MagickEpsilon) { intercept = (-z / affine->sx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->sx < -MagickEpsilon) { intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->sx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->columns)) { inverse_edge.x2 = edge->x1; return (inverse_edge); } /* * Determine top and bottom edges. */ z = affine->sy * y + affine->ty; if (affine->rx >= MagickEpsilon) { intercept = (-z / affine->rx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->rx < -MagickEpsilon) { intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->rx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->rows)) { inverse_edge.x2 = edge->x2; return (inverse_edge); } return (inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix * affine) { AffineMatrix inverse_affine; double determinant; determinant = PerceptibleReciprocal(affine->sx * affine->sy - affine->rx * affine->ry); inverse_affine.sx = determinant * affine->sy; inverse_affine.rx = determinant * (-affine->rx); inverse_affine.ry = determinant * (-affine->ry); inverse_affine.sy = determinant * affine->sx; inverse_affine.tx = (-affine->tx) * inverse_affine.sx - affine->ty * inverse_affine.ry; inverse_affine.ty = (-affine->tx) * inverse_affine.rx - affine->ty * inverse_affine.sy; return (inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image * image, const Image * source, const AffineMatrix * affine, ExceptionInfo * exception) { AffineMatrix inverse_affine; CacheView * image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* * Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(source != (const Image *)NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x = 0.0; extent[0].y = 0.0; extent[1].x = (double)source->columns - 1.0; extent[1].y = 0.0; extent[2].x = (double)source->columns - 1.0; extent[2].y = (double)source->rows - 1.0; extent[3].x = 0.0; extent[3].y = (double)source->rows - 1.0; for (i = 0; i < 4; i++) { PointInfo point; point = extent[i]; extent[i].x = point.x * affine->sx + point.y * affine->ry + affine->tx; extent[i].y = point.x * affine->rx + point.y * affine->sy + affine->ty; } min = extent[0]; max = extent[0]; for (i = 1; i < 4; i++) { if (min.x > extent[i].x) min.x = extent[i].x; if (min.y > extent[i].y) min.y = extent[i].y; if (max.x < extent[i].x) max.x = extent[i].x; if (max.y < extent[i].y) max.y = extent[i].y; } /* * Affine transform image. */ if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = MagickTrue; edge.x1 = MagickMax(min.x, 0.0); edge.y1 = MagickMax(min.y, 0.0); edge.x2 = MagickMin(max.x, (double)image->columns - 1.0); edge.y2 = MagickMin(max.y, (double)image->rows - 1.0); inverse_affine = InverseAffineMatrix(affine); GetPixelInfo(image, &zero); start = (ssize_t) ceil(edge.y1 - 0.5); stop = (ssize_t) floor(edge.y2 + 0.5); source_view = AcquireVirtualCacheView(source, exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum * magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge = AffineEdge(source, &inverse_affine, (double)y, &edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q = GetCacheViewAuthenticPixels(image_view, (ssize_t) ceil(inverse_edge.x1 - 0.5), y, (size_t) (floor(inverse_edge.x2 + 0.5) - ceil(inverse_edge.x1 - 0.5) + 1), 1, exception); if (q == (Quantum *) NULL) continue; pixel = zero; composite = zero; x_offset = 0; for (x = (ssize_t) ceil(inverse_edge.x1 - 0.5); x <= (ssize_t) floor(inverse_edge.x2 + 0.5); x++) { point.x = (double)x *inverse_affine.sx + y * inverse_affine.ry + inverse_affine.tx; point.y = (double)x *inverse_affine.rx + y * inverse_affine.sy + inverse_affine.ty; status = InterpolatePixelInfo(source, source_view, UndefinedInterpolatePixel, point.x, point.y, &pixel, exception); if (status == MagickFalse) break; GetPixelInfoPixel(image, q, &composite); CompositePixelInfoOver(&pixel, pixel.alpha, &composite, composite.alpha, &composite); SetPixelViaPixelInfo(image, &composite, q); x_offset++; q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w B o u n d i n g R e c t a n g l e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawBoundingRectangles() draws the bounding rectangles on the * image. This % is only useful for developers debugging the rendering * algorithm. % % The format of the DrawBoundingRectangles method is: % % * MagickBooleanType DrawBoundingRectangles(Image *image, % const * DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o draw_info: the draw info. % % o polygon_info: * Specifies a pointer to a PolygonInfo structure. % % o exception: return * any errors or warnings in this structure. % */ static inline double SaneStrokeWidth(const Image * image, const DrawInfo * draw_info) { return (MagickMin((double)draw_info->stroke_width, (2.0 * sqrt(2.0) + MagickEpsilon) * MagickMax(image->columns, image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image * image, const DrawInfo * draw_info, const PolygonInfo * polygon_info, ExceptionInfo * exception) { double mid; DrawInfo * clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void)memset(primitive_info, 0, sizeof(primitive_info)); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); status = QueryColorCompliance("#000F", AllCompliance, &clone_info->fill, exception); if (status == MagickFalse) { clone_info = DestroyDrawInfo(clone_info); return (MagickFalse); } resolution.x = 96.0; resolution.y = 96.0; if (clone_info->density != (char *)NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags = ParseGeometry(clone_info->density, &geometry_info); resolution.x = geometry_info.rho; resolution.y = geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y = resolution.x; } mid = (resolution.x / 96.0) * ExpandAffine(&clone_info->affine) * SaneStrokeWidth(image, clone_info) / 2.0; bounds.x1 = 0.0; bounds.y1 = 0.0; bounds.x2 = 0.0; bounds.y2 = 0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds = polygon_info->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double)bounds.x1) bounds.x1 = polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double)bounds.y1) bounds.y1 = polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double)bounds.x2) bounds.x2 = polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double)bounds.y2) bounds.y2 = polygon_info->edges[i].bounds.y2; } bounds.x1 -= mid; bounds.x1 = bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double)image->columns - 1 : bounds.x1; bounds.y1 -= mid; bounds.y1 = bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double)image->rows - 1 : bounds.y1; bounds.x2 += mid; bounds.x2 = bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double)image->columns - 1 : bounds.x2; bounds.y2 += mid; bounds.y2 = bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double)image->rows - 1 : bounds.y2; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status = QueryColorCompliance("#f00", AllCompliance, &clone_info->stroke, exception); else status = QueryColorCompliance("#0f0", AllCompliance, &clone_info->stroke, exception); if (status == MagickFalse) break; start.x = (double)(polygon_info->edges[i].bounds.x1 - mid); start.y = (double)(polygon_info->edges[i].bounds.y1 - mid); end.x = (double)(polygon_info->edges[i].bounds.x2 + mid); end.y = (double)(polygon_info->edges[i].bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; status &= TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; status = DrawPrimitive(image, clone_info, primitive_info, exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info = DestroyDrawInfo(clone_info); return (status == 0 ? MagickFalse : MagickTrue); } } status = QueryColorCompliance("#00f", AllCompliance, &clone_info->stroke, exception); if (status == MagickFalse) { clone_info = DestroyDrawInfo(clone_info); return (MagickFalse); } start.x = (double)(bounds.x1 - mid); start.y = (double)(bounds.y1 - mid); end.x = (double)(bounds.x2 + mid); end.y = (double)(bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; status &= TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; status = DrawPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); return (status == 0 ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C l i p P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawClipPath() draws the clip path on the image mask. % % The * format of the DrawClipPath method is: % % MagickBooleanType * DrawClipPath(Image *image,const DrawInfo *draw_info, % const char * *id,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o id: the clip path id. % % o exception: return any errors or warnings * in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image * image, const DrawInfo * draw_info, const char *id, ExceptionInfo * exception) { const char *clip_path; Image * clipping_mask; MagickBooleanType status; clip_path = GetImageArtifact(image, id); if (clip_path == (const char *)NULL) return (MagickFalse); clipping_mask = DrawClippingMask(image, draw_info, draw_info->clip_mask, clip_path, exception); if (clipping_mask == (Image *) NULL) return (MagickFalse); status = SetImageMask(image, WritePixelMask, clipping_mask, exception); clipping_mask = DestroyImage(clipping_mask); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C l i p p i n g M a s k * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawClippingMask() draws the clip path and returns it as an * image clipping % mask. % % The format of the DrawClippingMask method is: * % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % * const char *id,const char *clip_path,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * draw_info: the draw info. % % o id: the clip path id. % % o * clip_path: the clip path. % % o exception: return any errors or * warnings in this structure. % */ static Image * DrawClippingMask(Image * image, const DrawInfo * draw_info, const char *id, const char *clip_path, ExceptionInfo * exception) { DrawInfo * clone_info; Image * clip_mask, *separate_mask; MagickStatusType status; /* * Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); clip_mask = AcquireImage((const ImageInfo *)NULL, exception); status = SetImageExtent(clip_mask, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImage(clip_mask)); status = SetImageMask(clip_mask, WritePixelMask, (Image *) NULL, exception); status = QueryColorCompliance("#0000", AllCompliance, &clip_mask->background_color, exception); clip_mask->background_color.alpha = (MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait = BlendPixelTrait; status = SetImageBackgroundColor(clip_mask, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "\nbegin clip-path %s", id); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->primitive, clip_path); status = QueryColorCompliance("#ffffff", AllCompliance, &clone_info->fill, exception); if (clone_info->clip_mask != (char *)NULL) clone_info->clip_mask = DestroyString(clone_info->clip_mask); status = QueryColorCompliance("#00000000", AllCompliance, &clone_info->stroke, exception); clone_info->stroke_width = 0.0; clone_info->alpha = OpaqueAlpha; clone_info->clip_path = MagickTrue; status = RenderMVGContent(clip_mask, clone_info, 0, exception); clone_info = DestroyDrawInfo(clone_info); separate_mask = SeparateImage(clip_mask, AlphaChannel, exception); if (separate_mask != (Image *) NULL) { clip_mask = DestroyImage(clip_mask); clip_mask = separate_mask; status = NegateImage(clip_mask, MagickFalse, exception); if (status == MagickFalse) clip_mask = DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask = DestroyImage(clip_mask); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end clip-path"); return (clip_mask); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C o m p o s i t e M a s k * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawCompositeMask() draws the mask path and returns it as an * image mask. % % The format of the DrawCompositeMask method is: % % * Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % * const char *id,const char *mask_path,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * draw_info: the draw info. % % o id: the mask path id. % % o * mask_path: the mask path. % % o exception: return any errors or * warnings in this structure. % */ static Image * DrawCompositeMask(Image * image, const DrawInfo * draw_info, const char *id, const char *mask_path, ExceptionInfo * exception) { Image * composite_mask, *separate_mask; DrawInfo * clone_info; MagickStatusType status; /* * Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); composite_mask = AcquireImage((const ImageInfo *)NULL, exception); status = SetImageExtent(composite_mask, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImage(composite_mask)); status = SetImageMask(composite_mask, CompositePixelMask, (Image *) NULL, exception); status = QueryColorCompliance("#0000", AllCompliance, &composite_mask->background_color, exception); composite_mask->background_color.alpha = (MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait = BlendPixelTrait; (void)SetImageBackgroundColor(composite_mask, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "\nbegin mask-path %s", id); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->primitive, mask_path); status = QueryColorCompliance("#ffffff", AllCompliance, &clone_info->fill, exception); status = QueryColorCompliance("#00000000", AllCompliance, &clone_info->stroke, exception); clone_info->stroke_width = 0.0; clone_info->alpha = OpaqueAlpha; status = RenderMVGContent(composite_mask, clone_info, 0, exception); clone_info = DestroyDrawInfo(clone_info); separate_mask = SeparateImage(composite_mask, AlphaChannel, exception); if (separate_mask != (Image *) NULL) { composite_mask = DestroyImage(composite_mask); composite_mask = separate_mask; status = NegateImage(composite_mask, MagickFalse, exception); if (status == MagickFalse) composite_mask = DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask = DestroyImage(composite_mask); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end mask-path"); return (composite_mask); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w D a s h P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, * ellipse) on the % image while respecting the dash offset and dash pattern * attributes. % % The format of the DrawDashPolygon method is: % % * MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % * const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * draw_info: the draw info. % % o primitive_info: Specifies a pointer to * a PrimitiveInfo structure. % % o image: the image. % % o exception: * return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, Image * image, ExceptionInfo * exception) { double length, maximum_length, offset, scale, total_length; DrawInfo * clone_info; MagickStatusType status; PrimitiveInfo * dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-dash"); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); number_vertices = (size_t) i; dash_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL * number_vertices + 32UL), sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return (MagickFalse); (void)memset(dash_polygon, 0, (2UL * number_vertices + 32UL) * sizeof(*dash_polygon)); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->miterlimit = 0; dash_polygon[0] = primitive_info[0]; scale = ExpandAffine(&draw_info->affine); length = scale * draw_info->dash_pattern[0]; offset = fabs(draw_info->dash_offset) >= MagickEpsilon ? scale * draw_info->dash_offset : 0.0; j = 1; for (n = 0; offset > 0.0; j = 0) { if (draw_info->dash_pattern[n] <= 0.0) break; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); if (offset > length) { offset -= length; n++; length = scale * draw_info->dash_pattern[n]; continue; } if (offset < length) { length -= offset; offset = 0.0; break; } offset = 0.0; n++; } status = MagickTrue; maximum_length = 0.0; total_length = 0.0; for (i = 1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx = primitive_info[i].point.x - primitive_info[i - 1].point.x; dy = primitive_info[i].point.y - primitive_info[i - 1].point.y; maximum_length = hypot(dx, dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n = 0; length = scale * draw_info->dash_pattern[n]; } for (total_length = 0.0; (length >= 0.0) && (maximum_length >= (total_length + length));) { total_length += length; if ((n & 0x01) != 0) { dash_polygon[0] = primitive_info[0]; dash_polygon[0].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length * PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length * PerceptibleReciprocal(maximum_length)); j = 1; } else { if ((j + 1) > (ssize_t) number_vertices) break; dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length * PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length * PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n = 0; length = scale * draw_info->dash_pattern[n]; } length -= (maximum_length - total_length); if ((n & 0x01) != 0) continue; dash_polygon[j] = primitive_info[i]; dash_polygon[j].coordinates = 1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x += MagickEpsilon; dash_polygon[j].point.y += MagickEpsilon; dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); } dash_polygon = (PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-dash"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w G r a d i e n t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawGradientImage() draws a linear gradient on the image. % % * The format of the DrawGradientImage method is: % % MagickBooleanType * DrawGradientImage(Image *image, % const DrawInfo * *draw_info,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo * gradient, const ssize_t x, const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo * gradient_vector; gradient_vector = (&gradient->gradient_vector); p.x = gradient_vector->x2 - gradient_vector->x1; p.y = gradient_vector->y2 - gradient_vector->y1; q.x = (double)x - gradient_vector->x1; q.y = (double)y - gradient_vector->y1; length = sqrt(q.x * q.x + q.y * q.y); gamma = sqrt(p.x * p.x + p.y * p.y) * length; gamma = PerceptibleReciprocal(gamma); scale = p.x * q.x + p.y * q.y; offset = gamma * scale * length; return (offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x = (double)x - gradient->center.x; v.y = (double)y - gradient->center.y; return (sqrt(v.x * v.x + v.y * v.y)); } v.x = (double)(((x - gradient->center.x) * cos(DegreesToRadians( gradient->angle))) + ((y - gradient->center.y) * sin(DegreesToRadians( gradient->angle)))) * PerceptibleReciprocal(gradient->radii.x); v.y = (double)(((x - gradient->center.x) * sin(DegreesToRadians( gradient->angle))) - ((y - gradient->center.y) * cos(DegreesToRadians( gradient->angle)))) * PerceptibleReciprocal(gradient->radii.y); return (sqrt(v.x * v.x + v.y * v.y)); } } return (0.0); } static int StopInfoCompare(const void *x, const void *y) { StopInfo * stop_1, *stop_2; stop_1 = (StopInfo *) x; stop_2 = (StopInfo *) y; if (stop_1->offset > stop_2->offset) return (1); if (fabs(stop_1->offset - stop_2->offset) <= MagickEpsilon) return (0); return (-1); } MagickExport MagickBooleanType DrawGradientImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { CacheView * image_view; const GradientInfo * gradient; const SegmentInfo * gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* * Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); gradient = (&draw_info->gradient); qsort(gradient->stops, gradient->number_stops, sizeof(StopInfo), StopInfoCompare); gradient_vector = (&gradient->gradient_vector); point.x = gradient_vector->x2 - gradient_vector->x1; point.y = gradient_vector->y2 - gradient_vector->y1; length = sqrt(point.x * point.x + point.y * point.y); bounding_box = gradient->bounding_box; status = MagickTrue; GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); for (y = bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; register Quantum * magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; composite = zero; offset = GetStopColorOffset(gradient, 0, y); if (gradient->type != RadialGradient) offset *= PerceptibleReciprocal(length); for (x = bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image, q, &pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset *= PerceptibleReciprocal(length); } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite = gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset *= PerceptibleReciprocal(length); } if (offset < 0.0) offset = (-offset); if ((ssize_t) fmod(offset, 2.0) == 0) offset = fmod(offset, 1.0); else offset = 1.0 - fmod(offset, 1.0); for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias = MagickFalse; repeat = 0.0; if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type == LinearGradient) { repeat = fmod(offset, length); if (repeat < 0.0) repeat = length - fmod(-repeat, length); else repeat = fmod(offset, length); antialias = (repeat < length) && ((repeat + 1.0) > length) ? MagickTrue : MagickFalse; offset = PerceptibleReciprocal(length) * repeat; } else { repeat = fmod(offset, gradient->radius); if (repeat < 0.0) repeat = gradient->radius - fmod(-repeat, gradient->radius); else repeat = fmod(offset, gradient->radius); antialias = repeat + 1.0 > gradient->radius ? MagickTrue : MagickFalse; offset = repeat / gradient->radius; } } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha = length - repeat; else alpha = gradient->radius - repeat; i = 0; j = (ssize_t) gradient->number_stops - 1L; } CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } } CompositePixelInfoOver(&composite, composite.alpha, &pixel, pixel.alpha, &pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawImage() draws a graphic primitive on your image. The * primitive % may be represented as a string or filename. Precede the * filename with an % "at" sign (@) and the contents of the file are drawn * on the image. You % can affect how text is drawn by setting one or more * members of the draw % info structure. % % The format of the DrawImage * method is: % % MagickBooleanType DrawImage(Image *image,const * DrawInfo *draw_info, % ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o exception: return any errors or warnings in this * structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo * mvg_info, const size_t pad) { double extent; size_t quantum; /* * Check if there is enough storage for drawing pimitives. */ extent = (double)mvg_info->offset + pad + PrimitiveExtentPad; quantum = sizeof(**mvg_info->primitive_info); if (((extent * quantum) < (double)SSIZE_MAX) && ((extent * quantum) < (double)GetMaxMemoryRequest())) { if (extent <= (double)*mvg_info->extent) return (MagickTrue); *mvg_info->primitive_info = (PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info, (size_t) extent, quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent = (size_t) extent; for (i = mvg_info->offset + 1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive = UndefinedPrimitive; return (MagickTrue); } } /* * Reallocation failed, allocate a primitive to facilitate unwinding. */ (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info = (PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info = (PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad * quantum); (void)memset(*mvg_info->primitive_info, 0, PrimitiveExtentPad * quantum); *mvg_info->extent = 1; return (MagickFalse); } MagickExport int MVGMacroCompare(const void *target, const void *source) { const char *p, *q; p = (const char *)target; q = (const char *)source; return (strcmp(p, q)); } static SplayTreeInfo * GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo * macros; /* * Scan graphic primitives for definitions and classes. */ if (primitive == (const char *)NULL) return ((SplayTreeInfo *) NULL); macros = NewSplayTree(MVGMacroCompare, RelinquishMagickMemory, RelinquishMagickMemory); macro = AcquireString(primitive); token = AcquireString(primitive); extent = strlen(token) + MagickPathExtent; for (q = primitive; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push", token) == 0) { register const char *end, *start; (void)GetNextToken(q, &q, extent, token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* * Named macro (e.g. push graphic-context "wheel"). */ (void)GetNextToken(q, &q, extent, token); start = q; end = q; (void)CopyMagickString(name, token, MagickPathExtent); n = 1; for (p = q; *p != '\0';) { if (GetNextToken(p, &p, extent, token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token, "pop") == 0) { end = p - strlen(token) - 1; n--; } if (LocaleCompare(token, "push") == 0) n++; if ((n == 0) && (end > start)) { /* * Extract macro. */ (void)GetNextToken(p, &p, extent, token); (void)CopyMagickString(macro, start, (size_t) (end - start)); (void)AddValueToSplayTree(macros, ConstantString(name), ConstantString(macro)); break; } } } } } token = DestroyString(token); macro = DestroyString(macro); return (macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value = StringToDouble(point, &p); return ((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo * primitive_info, const PointInfo point) { primitive_info->coordinates = 1; primitive_info->closed_subpath = MagickFalse; primitive_info->point = point; return (MagickTrue); } static MagickBooleanType RenderMVGContent(Image * image, const DrawInfo * draw_info, const size_t depth, ExceptionInfo * exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo * clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo * primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo * macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo * stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError, "VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *)NULL) || (*draw_info->primitive == '\0')) return (MagickFalse); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin draw-image"); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status = SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); if (status == MagickFalse) return (MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive + 1) != '-') && (depth == 0)) primitive = FileToString(draw_info->primitive + 1, ~0UL, exception); else primitive = AcquireString(draw_info->primitive); if (primitive == (char *)NULL) return (MagickFalse); primitive_extent = (double)strlen(primitive); (void)SetImageArtifact(image, "mvg:vector-graphics", primitive); n = 0; number_stops = 0; stops = (StopInfo *) NULL; /* * Allocate primitive info memory. */ graphic_context = (DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive = DestroyString(primitive); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } number_points = PrimitiveExtentPad; primitive_info = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive = DestroyString(primitive); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(primitive_info, 0, (size_t) number_points * sizeof(*primitive_info)); (void)memset(&mvg_info, 0, sizeof(mvg_info)); mvg_info.primitive_info = (&primitive_info); mvg_info.extent = (&number_points); mvg_info.exception = exception; graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, draw_info); graphic_context[n]->viewbox = image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width = image->columns; graphic_context[n]->viewbox.height = image->rows; } token = AcquireString(primitive); extent = strlen(token) + MagickPathExtent; defsDepth = 0; symbolDepth = 0; cursor = 0.0; macros = GetMVGMacros(primitive); status = MagickTrue; for (q = primitive; *q != '\0';) { /* * Interpret graphic primitive. */ if (GetNextToken(q, &q, MagickPathExtent, keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* * Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p = q - strlen(keyword) - 1; primitive_type = UndefinedPrimitive; current = graphic_context[n]->affine; GetAffineMatrix(&affine); *token = '\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.rx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.ry = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("alpha", keyword) == 0) { primitive_type = AlphaPrimitive; break; } if (LocaleCompare("arc", keyword) == 0) { primitive_type = ArcPrimitive; break; } status = MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier", keyword) == 0) { primitive_type = BezierPrimitive; break; } if (LocaleCompare("border-color", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->border_color, exception); break; } status = MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class", keyword) == 0) { const char *mvg_class; (void)GetNextToken(q, &q, extent, token); if (*token == '\0') { status = MagickFalse; break; } if (LocaleCompare(token, graphic_context[n]->id) == 0) break; mvg_class = (const char *)GetValueFromSplayTree(macros, token); if (mvg_class != (const char *)NULL) { char *elements; ssize_t offset; /* * Inject class elements in stream. */ offset = (ssize_t) (p - primitive); elements = AcquireString(primitive); elements[offset] = '\0'; (void)ConcatenateString(&elements, mvg_class); (void)ConcatenateString(&elements, "\n"); (void)ConcatenateString(&elements, q); primitive = DestroyString(primitive); primitive = elements; q = primitive + offset; } break; } if (LocaleCompare("clip-path", keyword) == 0) { const char *clip_path; /* * Take a node from within the MVG document, and * duplicate it here. */ (void)GetNextToken(q, &q, extent, token); if (*token == '\0') { status = MagickFalse; break; } (void)CloneString(&graphic_context[n]->clip_mask, token); clip_path = (const char *)GetValueFromSplayTree(macros, token); if (clip_path != (const char *)NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask = DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask = DrawClippingMask(image, graphic_context[n], token, clip_path, exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path = (const char *)GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *)NULL) (void)SetImageArtifact(image, graphic_context[n]->clip_mask, clip_path); status &= DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); } } break; } if (LocaleCompare("clip-rule", keyword) == 0) { ssize_t fill_rule; (void)GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) { status = MagickFalse; break; } graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("clip-units", keyword) == 0) { ssize_t clip_units; (void)GetNextToken(q, &q, extent, token); clip_units = ParseCommandOption(MagickClipPathOptions, MagickFalse, token); if (clip_units == -1) { status = MagickFalse; break; } graphic_context[n]->clip_units = (ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx = draw_info->bounds.x2; affine.sy = draw_info->bounds.y2; affine.tx = draw_info->bounds.x1; affine.ty = draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle", keyword) == 0) { primitive_type = CirclePrimitive; break; } if (LocaleCompare("color", keyword) == 0) { primitive_type = ColorPrimitive; break; } if (LocaleCompare("compliance", keyword) == 0) { /* * MVG compliance associates a clipping mask with an * image; SVG compliance associates a clipping mask with * a graphics context. */ (void)GetNextToken(q, &q, extent, token); graphic_context[n]->compliance = (ComplianceType) ParseCommandOption( MagickComplianceOptions, MagickFalse, token); break; } status = MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate", keyword) == 0) { ssize_t decorate; (void)GetNextToken(q, &q, extent, token); decorate = ParseCommandOption(MagickDecorateOptions, MagickFalse, token); if (decorate == -1) { status = MagickFalse; break; } graphic_context[n]->decorate = (DecorationType) decorate; break; } if (LocaleCompare("density", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->density, token); break; } if (LocaleCompare("direction", keyword) == 0) { ssize_t direction; (void)GetNextToken(q, &q, extent, token); direction = ParseCommandOption(MagickDirectionOptions, MagickFalse, token); if (direction == -1) status = MagickFalse; else graphic_context[n]->direction = (DirectionType) direction; break; } status = MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse", keyword) == 0) { primitive_type = EllipsePrimitive; break; } if (LocaleCompare("encoding", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->encoding, token); break; } status = MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->fill_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->fill, exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha = graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity", keyword) == 0) { double opacity; (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; opacity = MagickMin(MagickMax(factor * StringToDouble(token, &next_token), 0.0), 1.0); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha *= opacity; else graphic_context[n]->fill_alpha = QuantumRange * opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha = graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha = (MagickRealType) ClampToQuantum(QuantumRange * (1.0 - opacity)); break; } if (LocaleCompare("fill-rule", keyword) == 0) { ssize_t fill_rule; (void)GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) { status = MagickFalse; break; } graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("font", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->font, token); if (LocaleCompare("none", token) == 0) graphic_context[n]->font = (char *)RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->family, token); break; } if (LocaleCompare("font-size", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->pointsize = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("font-stretch", keyword) == 0) { ssize_t stretch; (void)GetNextToken(q, &q, extent, token); stretch = ParseCommandOption(MagickStretchOptions, MagickFalse, token); if (stretch == -1) { status = MagickFalse; break; } graphic_context[n]->stretch = (StretchType) stretch; break; } if (LocaleCompare("font-style", keyword) == 0) { ssize_t style; (void)GetNextToken(q, &q, extent, token); style = ParseCommandOption(MagickStyleOptions, MagickFalse, token); if (style == -1) { status = MagickFalse; break; } graphic_context[n]->style = (StyleType) style; break; } if (LocaleCompare("font-weight", keyword) == 0) { ssize_t weight; (void)GetNextToken(q, &q, extent, token); weight = ParseCommandOption(MagickWeightOptions, MagickFalse, token); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight = (size_t) weight; break; } status = MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("gravity", keyword) == 0) { ssize_t gravity; (void)GetNextToken(q, &q, extent, token); gravity = ParseCommandOption(MagickGravityOptions, MagickFalse, token); if (gravity == -1) { status = MagickFalse; break; } graphic_context[n]->gravity = (GravityType) gravity; break; } status = MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image", keyword) == 0) { ssize_t compose; primitive_type = ImagePrimitive; (void)GetNextToken(q, &q, extent, token); compose = ParseCommandOption(MagickComposeOptions, MagickFalse, token); if (compose == -1) { status = MagickFalse; break; } graphic_context[n]->compose = (CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->interline_spacing = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("interword-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->interword_spacing = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->kerning = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (IsPoint(token) == MagickFalse) break; clone_info = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n]); clone_info->text = AcquireString(" "); status &= GetTypeMetrics(image, clone_info, &metrics, exception); graphic_context[n]->kerning = metrics.width * StringToDouble(token, &next_token); clone_info = DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("line", keyword) == 0) { primitive_type = LinePrimitive; break; } status = MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask", keyword) == 0) { const char *mask_path; /* * Take a node from within the MVG document, and * duplicate it here. */ (void)GetNextToken(q, &q, extent, token); mask_path = (const char *)GetValueFromSplayTree(macros, token); if (mask_path != (const char *)NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask = DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask = DrawCompositeMask(image, graphic_context[n], token, mask_path, exception); if (graphic_context[n]->compliance != SVGCompliance) status = SetImageMask(image, CompositePixelMask, graphic_context[n]->composite_mask, exception); } break; } break; } case 'o': case 'O': { if (LocaleCompare("offset", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("opacity", keyword) == 0) { double opacity; (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; opacity = MagickMin(MagickMax(factor * StringToDouble(token, &next_token), 0.0), 1.0); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha *= opacity; graphic_context[n]->stroke_alpha *= opacity; } else { graphic_context[n]->fill_alpha = QuantumRange * opacity; graphic_context[n]->stroke_alpha = QuantumRange * opacity; } break; } status = MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path", keyword) == 0) { primitive_type = PathPrimitive; break; } if (LocaleCompare("point", keyword) == 0) { primitive_type = PointPrimitive; break; } if (LocaleCompare("polyline", keyword) == 0) { primitive_type = PolylinePrimitive; break; } if (LocaleCompare("polygon", keyword) == 0) { primitive_type = PolygonPrimitive; break; } if (LocaleCompare("pop", keyword) == 0) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare("class", token) == 0) break; if (LocaleCompare("clip-path", token) == 0) break; if (LocaleCompare("defs", token) == 0) { defsDepth--; graphic_context[n]->render = defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient", token) == 0) break; if (LocaleCompare("graphic-context", token) == 0) { if (n <= 0) { (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "UnbalancedGraphicContextPushPop", "`%s'", token); status = MagickFalse; n = 0; break; } if ((graphic_context[n]->clip_mask != (char *)NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0) status = SetImageMask(image, WritePixelMask, (Image *) NULL, exception); graphic_context[n] = DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask", token) == 0) break; if (LocaleCompare("pattern", token) == 0) break; if (LocaleCompare("symbol", token) == 0) { symbolDepth--; graphic_context[n]->render = symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status = MagickFalse; break; } if (LocaleCompare("push", keyword) == 0) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare("class", token) == 0) { /* * Class context. */ for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "class") != 0) continue; break; } (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("clip-path", token) == 0) { (void)GetNextToken(q, &q, extent, token); for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "clip-path") != 0) continue; break; } if ((q == (char *)NULL) || (p == (char *)NULL) || ((q - 4) < p)) { status = MagickFalse; break; } (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("defs", token) == 0) { defsDepth++; graphic_context[n]->render = defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void)GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); (void)GetNextToken(q, &q, extent, token); (void)CopyMagickString(type, token, MagickPathExtent); (void)GetNextToken(q, &q, extent, token); segment.x1 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); segment.y1 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); segment.x2 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); segment.y2 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (LocaleCompare(type, "radial") == 0) { (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); } for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "gradient") != 0) continue; break; } if ((q == (char *)NULL) || (p == (char *)NULL) || ((q - 4) < p)) { status = MagickFalse; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); bounds.x1 = graphic_context[n]->affine.sx * segment.x1 + graphic_context[n]->affine.ry * segment.y1 + graphic_context[n]->affine.tx; bounds.y1 = graphic_context[n]->affine.rx * segment.x1 + graphic_context[n]->affine.sy * segment.y1 + graphic_context[n]->affine.ty; bounds.x2 = graphic_context[n]->affine.sx * segment.x2 + graphic_context[n]->affine.ry * segment.y2 + graphic_context[n]->affine.tx; bounds.y2 = graphic_context[n]->affine.rx * segment.x2 + graphic_context[n]->affine.sy * segment.y2 + graphic_context[n]->affine.ty; (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-type", name); (void)SetImageArtifact(image, key, type); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2 - bounds.x1 + 1.0), 1.0), MagickMax(fabs(bounds.y2 - bounds.y1 + 1.0), 1.0), bounds.x1, bounds.y1); (void)SetImageArtifact(image, key, geometry); (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("graphic-context", token) == 0) { n++; graphic_context = (DrawInfo **) ResizeQuantumMemory( graphic_context, (size_t) (n + 1), sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n - 1]); if (*q == '"') { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->id, token); } break; } if (LocaleCompare("mask", token) == 0) { (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("pattern", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; (void)GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); (void)GetNextToken(q, &q, extent, token); bounds.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); bounds.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); bounds.width = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); bounds.height = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "pattern") != 0) continue; break; } if ((q == (char *)NULL) || (p == (char *)NULL) || ((q - 4) < p)) { status = MagickFalse; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)bounds.width, (double) bounds.height, (double)bounds.x, (double)bounds.y); (void)SetImageArtifact(image, key, geometry); (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("symbol", token) == 0) { symbolDepth++; graphic_context[n]->render = symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status = MagickFalse; break; } status = MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle", keyword) == 0) { primitive_type = RectanglePrimitive; break; } if (LocaleCompare("rotate", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); affine.sx = cos(DegreesToRadians(fmod((double)angle, 360.0))); affine.rx = sin(DegreesToRadians(fmod((double)angle, 360.0))); affine.ry = (-sin(DegreesToRadians(fmod((double)angle, 360.0)))); affine.sy = cos(DegreesToRadians(fmod((double)angle, 360.0))); break; } if (LocaleCompare("roundRectangle", keyword) == 0) { primitive_type = RoundRectanglePrimitive; break; } status = MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("skewX", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); affine.ry = sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); affine.rx = (-tan(DegreesToRadians(angle) / 2.0)); break; } if (LocaleCompare("stop-color", keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops = (StopInfo *) AcquireQuantumMemory(2, sizeof(*stops)); else if (number_stops > 2) stops = (StopInfo *) ResizeQuantumMemory(stops, number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } (void)GetNextToken(q, &q, extent, token); status &= QueryColorCompliance(token, AllCompliance, &stop_color, exception); stops[number_stops - 1].color = stop_color; (void)GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; stops[number_stops - 1].offset = factor * StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("stroke", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->stroke_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->stroke, exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha = graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->stroke_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray", keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *)NULL) graphic_context[n]->dash_pattern = (double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r = q; (void)GetNextToken(r, &r, extent, token); if (*token == ',') (void)GetNextToken(r, &r, extent, token); for (x = 0; IsPoint(token) != MagickFalse; x++) { (void)GetNextToken(r, &r, extent, token); if (*token == ',') (void)GetNextToken(r, &r, extent, token); } graphic_context[n]->dash_pattern = (double *) AcquireQuantumMemory((size_t) (2 * x + 2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); status = MagickFalse; break; } (void)memset(graphic_context[n]->dash_pattern, 0, (size_t) (2 * x + 2) * sizeof(*graphic_context[n]->dash_pattern)); for (j = 0; j < x; j++) { (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->dash_pattern[j] = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status = MagickFalse; } if ((x & 0x01) != 0) for (; j < (2 * x); j++) graphic_context[n]->dash_pattern[j] = graphic_context[n]->dash_pattern[j - x]; graphic_context[n]->dash_pattern[j] = 0.0; break; } (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("stroke-dashoffset", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->dash_offset = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("stroke-linecap", keyword) == 0) { ssize_t linecap; (void)GetNextToken(q, &q, extent, token); linecap = ParseCommandOption(MagickLineCapOptions, MagickFalse, token); if (linecap == -1) { status = MagickFalse; break; } graphic_context[n]->linecap = (LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin", keyword) == 0) { ssize_t linejoin; (void)GetNextToken(q, &q, extent, token); linejoin = ParseCommandOption(MagickLineJoinOptions, MagickFalse, token); if (linejoin == -1) { status = MagickFalse; break; } graphic_context[n]->linejoin = (LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->miterlimit = StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity", keyword) == 0) { double opacity; (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; opacity = MagickMin(MagickMax(factor * StringToDouble(token, &next_token), 0.0), 1.0); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha *= opacity; else graphic_context[n]->stroke_alpha = QuantumRange * opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha = graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha = (MagickRealType) ClampToQuantum(QuantumRange * (1.0 - opacity)); break; } if (LocaleCompare("stroke-width", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text", keyword) == 0) { primitive_type = TextPrimitive; cursor = 0.0; break; } if (LocaleCompare("text-align", keyword) == 0) { ssize_t align; (void)GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) { status = MagickFalse; break; } graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-anchor", keyword) == 0) { ssize_t align; (void)GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) { status = MagickFalse; break; } graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-antialias", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->text_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->undercolor, exception); break; } if (LocaleCompare("translate", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); cursor = 0.0; break; } status = MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use", keyword) == 0) { const char *use; /* * Get a macro from the MVG document, and "use" it here. */ (void)GetNextToken(q, &q, extent, token); use = (const char *)GetValueFromSplayTree(macros, token); if (use != (const char *)NULL) { clone_info = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n]); (void)CloneString(&clone_info->primitive, use); status = RenderMVGContent(image, clone_info, depth + 1, exception); clone_info = DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.width = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.height = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->interword_spacing = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } default: { status = MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx - 1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy - 1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx = current.sx * affine.sx + current.ry * affine.rx; graphic_context[n]->affine.rx = current.rx * affine.sx + current.sy * affine.rx; graphic_context[n]->affine.ry = current.sx * affine.ry + current.ry * affine.sy; graphic_context[n]->affine.sy = current.rx * affine.ry + current.sy * affine.sy; graphic_context[n]->affine.tx = current.sx * affine.tx + current.ry * affine.ty + current.tx; graphic_context[n]->affine.ty = current.rx * affine.tx + current.sy * affine.ty + current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type = LinearGradient; if (draw_info->gradient.type == RadialGradient) type = RadialGradient; (void)GradientImage(image, type, PadSpread, stops, number_stops, exception); } if (number_stops > 0) stops = (StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int) (q - p - 1), p); continue; } /* * Parse the primitive attributes. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *)NULL) primitive_info[i].text = DestroyString(primitive_info[i].text); i = 0; mvg_info.offset = i; j = 0; primitive_info[0].point.x = 0.0; primitive_info[0].point.y = 0.0; primitive_info[0].coordinates = 0; primitive_info[0].method = FloodfillMethod; primitive_info[0].closed_subpath = MagickFalse; for (x = 0; *q != '\0'; x++) { /* * Define points. */ if (IsPoint(q) == MagickFalse) break; (void)GetNextToken(q, &q, extent, token); point.x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); point.y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, (const char **)NULL, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); primitive_info[i].primitive = primitive_type; primitive_info[i].point = point; primitive_info[i].coordinates = 0; primitive_info[i].method = FloodfillMethod; primitive_info[i].closed_subpath = MagickFalse; i++; mvg_info.offset = i; if (i < (ssize_t) number_points) continue; status &= CheckPrimitiveExtent(&mvg_info, number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *)NULL) primitive_info[j].text = DestroyString(primitive_info[j].text); primitive_info[j].primitive = primitive_type; primitive_info[j].coordinates = (size_t) x; primitive_info[j].method = FloodfillMethod; primitive_info[j].closed_subpath = MagickFalse; /* * Circumscribe primitive within a circle. */ bounds.x1 = primitive_info[j].point.x; bounds.y1 = primitive_info[j].point.y; bounds.x2 = primitive_info[j].point.x; bounds.y2 = primitive_info[j].point.y; for (k = 1; k < (ssize_t) primitive_info[j].coordinates; k++) { point = primitive_info[j + k].point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.y < bounds.y1) bounds.y1 = point.y; if (point.x > bounds.x2) bounds.x2 = point.x; if (point.y > bounds.y2) bounds.y2 = point.y; } /* * Speculate how many points our primitive might consume. */ coordinates = (double)primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates *= 5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot((double)alpha, (double)beta); coordinates *= 5.0; coordinates += 2.0 * ((size_t) ceil((double)MagickPI * radius)) + 6.0 * BezierQuantum + 360.0; break; } case BezierPrimitive: { coordinates = (double)(BezierQuantum * primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107 * BezierQuantum)) { (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "TooManyBezierCoordinates", "`%s'", token); status = MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void)GetNextToken(q, &q, extent, token); coordinates = 1.0; t = token; for (s = token; *s != '\0'; s = t) { double value; value = StringToDouble(s, &t); (void)value; if (s == t) { t++; continue; } coordinates++; } for (s = token; *s != '\0'; s++) if (strspn(s, "AaCcQqSsTt") != 0) coordinates += (20.0 * BezierQuantum) + 360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot(alpha, beta); coordinates = 2.0 * (ceil(MagickPI * radius)) + 6.0 * BezierQuantum + 360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i + coordinates)) >= number_points) { /* * Resize based on speculative points required by primitive. */ number_points += coordinates + 1; if (number_points < (size_t) coordinates) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } mvg_info.offset = i; status &= CheckPrimitiveExtent(&mvg_info, number_points); } status &= CheckPrimitiveExtent(&mvg_info, PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset = j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } status &= TracePoint(primitive_info + j, primitive_info[j].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } status &= TraceLine(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } status &= TraceRectangle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } if ((primitive_info[j + 2].point.x < 0.0) || (primitive_info[j + 2].point.y < 0.0)) { status = MagickFalse; break; } if ((primitive_info[j + 1].point.x - primitive_info[j].point.x) < 0.0) { status = MagickFalse; break; } if ((primitive_info[j + 1].point.y - primitive_info[j].point.y) < 0.0) { status = MagickFalse; break; } status &= TraceRoundRectangle(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type = UndefinedPrimitive; break; } status &= TraceArc(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } if ((primitive_info[j + 1].point.x < 0.0) || (primitive_info[j + 1].point.y < 0.0)) { status = MagickFalse; break; } status &= TraceEllipse(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } status &= TraceCircle(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status = MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status = MagickFalse; break; } primitive_info[i] = primitive_info[j]; primitive_info[i].coordinates = 0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath = MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status = MagickFalse; break; } status &= TraceBezier(&mvg_info, primitive_info[j].coordinates); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates = (double)TracePath(&mvg_info, token, exception); if (coordinates < 0.0) { status = MagickFalse; break; } i = (ssize_t) (j + coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } (void)GetNextToken(q, &q, extent, token); method = ParseCommandOption(MagickMethodOptions, MagickFalse, token); if (method == -1) { status = MagickFalse; break; } primitive_info[j].method = (PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } if (*token != ',') (void)GetNextToken(q, &q, extent, token); (void)CloneString(&primitive_info[j].text, token); /* * Compute text cursor offset. */ clone_info = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n]); if ((fabs(mvg_info.point.x - primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y - primitive_info->point.y) < MagickEpsilon)) { mvg_info.point = primitive_info->point; primitive_info->point.x += cursor; } else { mvg_info.point = primitive_info->point; cursor = 0.0; } (void)FormatLocaleString(geometry, MagickPathExtent, "%+f%+f", primitive_info->point.x, primitive_info->point.y); clone_info->render = MagickFalse; clone_info->text = AcquireString(token); status &= GetTypeMetrics(image, clone_info, &metrics, exception); clone_info = DestroyDrawInfo(clone_info); cursor += metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor = 0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } (void)GetNextToken(q, &q, extent, token); (void)CloneString(&primitive_info[j].text, token); break; } } mvg_info.offset = i; if ((image->debug != MagickFalse) && (q > p)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int)(q - p - 1), p); if (status == MagickFalse) break; primitive_info[i].primitive = UndefinedPrimitive; if (i == 0) continue; /* * Transform points. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; primitive_info[i].point.x = graphic_context[n]->affine.sx * point.x + graphic_context[n]->affine.ry * point.y + graphic_context[n]->affine.tx; primitive_info[i].point.y = graphic_context[n]->affine.rx * point.x + graphic_context[n]->affine.sy * point.y + graphic_context[n]->affine.ty; point = primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1 = point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1 = point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2 = point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2 = point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *)NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0)) { const char *clip_path; clip_path = (const char *)GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *)NULL) (void)SetImageArtifact(image, graphic_context[n]->clip_mask, clip_path); status &= DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); } status &= DrawPrimitive(image, graphic_context[n], primitive_info, exception); } proceed = SetImageProgress(image, RenderImageTag, q - primitive, (MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end draw-image"); /* * Relinquish resources. */ macros = DestroySplayTree(macros); token = DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *)NULL) primitive_info[i].text = DestroyString(primitive_info[i].text); primitive_info = (PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive = DestroyString(primitive); if (stops != (StopInfo *) NULL) stops = (StopInfo *) RelinquishMagickMemory(stops); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError, "NonconformingDrawingPrimitiveDefinition", keyword); return (status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { return (RenderMVGContent(image, draw_info, 0, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P a t t e r n P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPatternPath() draws a pattern. % % The format of the * DrawPatternPath method is: % % MagickBooleanType * DrawPatternPath(Image *image,const DrawInfo *draw_info, % const * char *name,Image **pattern,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o draw_info: the * draw info. % % o name: the pattern name. % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image * image, const DrawInfo * draw_info, const char *name, Image ** pattern, ExceptionInfo * exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo * clone_info; ImageInfo * image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); assert(name != (const char *)NULL); (void)FormatLocaleString(property, MagickPathExtent, "%s", name); path = GetImageArtifact(image, property); if (path == (const char *)NULL) return (MagickFalse); (void)FormatLocaleString(property, MagickPathExtent, "%s-geometry", name); geometry = GetImageArtifact(image, property); if (geometry == (const char *)NULL) return (MagickFalse); if ((*pattern) != (Image *) NULL) *pattern = DestroyImage(*pattern); image_info = AcquireImageInfo(); image_info->size = AcquireString(geometry); *pattern = AcquireImage(image_info, exception); image_info = DestroyImageInfo(image_info); (void)QueryColorCompliance("#00000000", AllCompliance, &(*pattern)->background_color, exception); (void)SetImageBackgroundColor(*pattern, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin pattern-path %s %s", name, geometry); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill_pattern = NewImageList(); clone_info->stroke_pattern = NewImageList(); (void)FormatLocaleString(property, MagickPathExtent, "%s-type", name); type = GetImageArtifact(image, property); if (type != (const char *)NULL) clone_info->gradient.type = (GradientType) ParseCommandOption( MagickGradientOptions, MagickFalse, type); (void)CloneString(&clone_info->primitive, path); status = RenderMVGContent(*pattern, clone_info, 0, exception); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end pattern-path"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w P o l y g o n P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The * format of the DrawPolygonPrimitive method is: % % MagickBooleanType * DrawPolygonPrimitive(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o draw_info: the draw info. % % o primitive_info: * Specifies a pointer to a PrimitiveInfo structure. % % o exception: * return any errors or warnings in this structure. % */ static PolygonInfo ** DestroyPolygonThreadSet(PolygonInfo ** polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i = 0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i] = DestroyPolygonInfo(polygon_info[i]); polygon_info = (PolygonInfo **) RelinquishMagickMemory(polygon_info); return (polygon_info); } static PolygonInfo ** AcquirePolygonThreadSet( const PrimitiveInfo * primitive_info) { PathInfo * magick_restrict path_info; PolygonInfo ** polygon_info; register ssize_t i; size_t number_threads; number_threads = (size_t) GetMagickResourceLimit(ThreadResource); polygon_info = (PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return ((PolygonInfo **) NULL); (void)memset(polygon_info, 0, number_threads * sizeof(*polygon_info)); path_info = ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); for (i = 0; i < (ssize_t) number_threads; i++) { polygon_info[i] = ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); } path_info = (PathInfo *) RelinquishMagickMemory(path_info); return (polygon_info); } static double GetFillAlpha(PolygonInfo * polygon_info, const double mid, const MagickBooleanType fill, const FillRule fill_rule, const ssize_t x, const ssize_t y, double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo * q; register EdgeInfo * p; register ssize_t i; ssize_t j, winding_number; /* * Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha = 0.0; subpath_alpha = 0.0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= (p->bounds.y1 - mid - 0.5)) break; if ((double)y > (p->bounds.y2 + mid + 0.5)) { (void)DestroyEdge(polygon_info, (size_t) j); continue; } if (((double)x <= (p->bounds.x1 - mid - 0.5)) || ((double)x > (p->bounds.x2 + mid + 0.5))) continue; i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) p->number_points; i++) { if ((double)y <= (p->points[i - 1].y - mid - 0.5)) break; if ((double)y > (p->points[i].y + mid + 0.5)) continue; if (p->scanline != (double)y) { p->scanline = (double)y; p->highwater = (size_t) i; } /* * Compute distance between a point and an edge. */ q = p->points + i - 1; delta.x = (q + 1)->x - q->x; delta.y = (q + 1)->y - q->y; beta = delta.x * (x - q->x) + delta.y * (y - q->y); if (beta <= 0.0) { delta.x = (double)x - q->x; delta.y = (double)y - q->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = delta.x * delta.x + delta.y * delta.y; if (beta >= alpha) { delta.x = (double)x - (q + 1)->x; delta.y = (double)y - (q + 1)->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = PerceptibleReciprocal(alpha); beta = delta.x * (y - q->y) - delta.y * (x - q->x) + MagickEpsilon; distance = alpha * beta * beta; } } /* * Compute stroke & subpath opacity. */ beta = 0.0; if (p->ghostline == MagickFalse) { alpha = mid + 0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha + 0.25) * (alpha + 0.25)))) { alpha = mid - 0.5; if (distance <= ((alpha + 0.25) * (alpha + 0.25))) *stroke_alpha = 1.0; else { beta = 1.0; if (fabs(distance - 1.0) >= MagickEpsilon) beta = sqrt((double)distance); alpha = beta - mid - 0.5; if (*stroke_alpha < ((alpha - 0.25) * (alpha - 0.25))) *stroke_alpha = (alpha - 0.25) * (alpha - 0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha = 1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta = 1.0; if (fabs(distance - 1.0) >= MagickEpsilon) beta = sqrt(distance); } alpha = beta - 1.0; if (subpath_alpha < (alpha * alpha)) subpath_alpha = alpha * alpha; } } /* * Compute fill opacity. */ if (fill == MagickFalse) return (0.0); if (subpath_alpha >= 1.0) return (1.0); /* * Determine winding number. */ winding_number = 0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= p->bounds.y1) break; if (((double)y > p->bounds.y2) || ((double)x <= p->bounds.x1)) continue; if ((double)x > p->bounds.x2) { winding_number += p->direction ? 1 : -1; continue; } i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) (p->number_points - 1); i++) if ((double)y <= p->points[i].y) break; q = p->points + i - 1; if ((((q + 1)->x - q->x) * (y - q->y)) <= (((q + 1)->y - q->y) * (x - q->x))) winding_number += p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return (1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return (1.0); return (subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType fill, status; double mid; PolygonInfo ** magick_restrict polygon_info; register EdgeInfo * p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return (MagickTrue); /* * Compute bounding box. */ polygon_info = AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return (MagickFalse); DisableMSCWarning(4127) if (0) { status = DrawBoundingRectangles(image, draw_info, polygon_info[0], exception); if (status == MagickFalse) { polygon_info = DestroyPolygonThreadSet(polygon_info); return (status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-polygon"); fill = (primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid = ExpandAffine(&draw_info->affine) * SaneStrokeWidth(image, draw_info) / 2.0; bounds = polygon_info[0]->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p = polygon_info[0]->edges + i; if (p->bounds.x1 < bounds.x1) bounds.x1 = p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1 = p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2 = p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2 = p->bounds.y2; } bounds.x1 -= (mid + 1.0); bounds.y1 -= (mid + 1.0); bounds.x2 += (mid + 1.0); bounds.y2 += (mid + 1.0); if ((bounds.x1 >= (double)image->columns) || (bounds.y1 >= (double)image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info = DestroyPolygonThreadSet(polygon_info); return (MagickTrue); /* virtual polygon */ } bounds.x1 = bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)image->columns - 1.0 ? (double)image->columns - 1.0 : bounds.x1; bounds.y1 = bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)image->rows - 1.0 ? (double)image->rows - 1.0 : bounds.y1; bounds.x2 = bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)image->columns - 1.0 ? (double)image->columns - 1.0 : bounds.x2; bounds.y2 = bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)image->rows - 1.0 ? (double)image->rows - 1.0 : bounds.y2; status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* * Draw point. */ start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); for (y = start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); x = start_x; q = GetCacheViewAuthenticPixels(image_view, x, y, (size_t) (stop_x - x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } GetPixelInfo(image, &pixel); for (; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x - 0.5)) && (y == (ssize_t) ceil(primitive_info->point.y - 0.5))) { GetFillColor(draw_info, x - start_x, y - start_y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); } q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * Draw polygon or line. */ start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); for (y = start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum * magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); q = GetCacheViewAuthenticPixels(image_view, start_x, y, (size_t) (stop_x - start_x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* * Fill and/or stroke. */ fill_alpha = GetFillAlpha(polygon_info[id], mid, fill, draw_info->fill_rule, x, y, &stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha = fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha = stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info, x - start_x, y - start_y, &fill_color, exception); CompositePixelOver(image, &fill_color, fill_alpha * fill_color.alpha, q, (double)GetPixelAlpha(image, q), q); GetStrokeColor(draw_info, x - start_x, y - start_y, &stroke_color, exception); CompositePixelOver(image, &stroke_color, stroke_alpha * stroke_color.alpha, q, (double)GetPixelAlpha(image, q), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on * the image. % % The format of the DrawPrimitive method is: % % * MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % * PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o primitive_info: Specifies a pointer to a * PrimitiveInfo structure. % % o exception: return any errors or warnings * in this structure. % */ static inline double ConstrainCoordinate(double x) { if (x < (double)-(SSIZE_MAX - 512)) return ((double)-(SSIZE_MAX - 512)); if (x > (double)(SSIZE_MAX - 512)) return ((double)(SSIZE_MAX - 512)); return (x); } static void LogPrimitiveInfo(const PrimitiveInfo * primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x = (ssize_t) ceil(primitive_info->point.x - 0.5); y = (ssize_t) ceil(primitive_info->point.y - 0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ColorPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ImagePrimitive %.20g,%.20g", (double)x, (double)y); return; } case PointPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "PointPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case TextPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "TextPrimitive %.20g,%.20g", (double)x, (double)y); return; } default: break; } coordinates = 0; p = primitive_info[0].point; q.x = (-1.0); q.y = (-1.0); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; if (coordinates <= 0) { coordinates = (ssize_t) primitive_info[i].coordinates; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin open (%.20g)", (double)coordinates); p = point; } point = primitive_info[i].point; if ((fabs(q.x - point.x) >= MagickEpsilon) || (fabs(q.y - point.y) >= MagickEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %.18g,%.18g", (double)coordinates, point.x, point.y); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %g %g (duplicate)", (double)coordinates, point.x, point.y); q = point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x - point.x) >= MagickEpsilon) || (fabs(p.y - point.y) >= MagickEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end last (%.20g)", (double)coordinates); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end open (%.20g)", (double)coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-primitive"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g", draw_info->affine.sx, draw_info->affine.rx, draw_info->affine.ry, draw_info->affine.sy, draw_info->affine.tx, draw_info->affine.ty); } status = MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status &= SetImageColorspace(image, sRGBColorspace, exception); if (draw_info->compliance == SVGCompliance) { status &= SetImageMask(image, WritePixelMask, draw_info->clipping_mask, exception); status &= SetImageMask(image, CompositePixelMask, draw_info->composite_mask, exception); } x = (ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x - 0.5)); y = (ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y - 0.5)); image_view = AcquireAuthenticCacheView(image, exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status &= SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); status &= SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status &= GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status &= GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } channel_mask = SetImageChannelMask(image, AlphaChannel); status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); (void)SetImageChannelMask(image, channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image, &pixel); GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); status &= SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status &= GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status &= GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image * composite_image, *composite_images; ImageInfo * clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *)NULL) break; clone_info = AcquireImageInfo(); composite_images = (Image *) NULL; if (LocaleNCompare(primitive_info->text, "data:", 5) == 0) composite_images = ReadInlineImage(clone_info, primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void)CopyMagickString(clone_info->filename, primitive_info->text, MagickPathExtent); composite_images = ReadImage(clone_info, exception); } clone_info = DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status = MagickFalse; break; } composite_image = RemoveFirstImageFromList(&composite_images); composite_images = DestroyImageList(composite_images); (void)SetImageProgressMonitor(composite_image, (MagickProgressMonitor) NULL, (void *)NULL); x1 = (ssize_t) ceil(primitive_info[1].point.x - 0.5); y1 = (ssize_t) ceil(primitive_info[1].point.y - 0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* * Resize image. */ (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%gx%g!", primitive_info[1].point.x, primitive_info[1].point.y); composite_image->filter = image->filter; status &= TransformImage(&composite_image, (char *)NULL, composite_geometry, exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status &= SetImageAlphaChannel(composite_image, OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status &= SetImageAlpha(composite_image, draw_info->alpha, exception); SetGeometry(image, &geometry); image->gravity = draw_info->gravity; geometry.x = x; geometry.y = y; (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)composite_image->columns, (double) composite_image->rows, (double)geometry.x, (double)geometry.y); (void)ParseGravityGeometry(image, composite_geometry, &geometry, exception); affine = draw_info->affine; affine.tx = (double)geometry.x; affine.ty = (double)geometry.y; composite_image->interpolate = image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status &= DrawAffineImage(image, composite_image, &affine, exception); else status &= CompositeImage(image, composite_image, draw_info->compose, MagickTrue, geometry.x, geometry.y, exception); composite_image = DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum * q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &fill_color, exception); CompositePixelOver(image, &fill_color, (double)fill_color.alpha, q, (double) GetPixelAlpha(image, q), q); status &= SyncCacheViewAuthenticPixels(image_view, exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo * clone_info; if (primitive_info->text == (char *)NULL) break; clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->text, primitive_info->text); (void)FormatLocaleString(geometry, MagickPathExtent, "%+f%+f", primitive_info->point.x, primitive_info->point.y); (void)CloneString(&clone_info->geometry, geometry); status &= AnnotateImage(image, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo * clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale = ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *)NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale * draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* * Draw dash polygon. */ clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); if (status != MagickFalse) status &= DrawDashPolygon(draw_info, primitive_info, image, exception); break; } mid = ExpandAffine(&draw_info->affine) * SaneStrokeWidth(image, draw_info) / 2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* * Draw strokes while respecting line cap/join attributes. */ closed_path = primitive_info[0].closed_subpath; i = (ssize_t) primitive_info[0].coordinates; x = fabs(primitive_info[i - 1].point.x - primitive_info[0].point.x); y = fabs(primitive_info[i - 1].point.y - primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path = MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status &= DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); if (status != MagickFalse) status &= DrawStrokePolygon(image, draw_info, primitive_info, exception); break; } status &= DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } } image_view = DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status &= SetImageMask(image, WritePixelMask, (Image *) NULL, exception); status &= SetImageMask(image, CompositePixelMask, (Image *) NULL, exception); } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-primitive"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w S t r o k e P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, * ellipse) on % the image while respecting the line cap and join * attributes. % % The format of the DrawStrokePolygon method is: % % * MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info) % % A description of each * parameter follows: % % o image: the image. % % o draw_info: the draw * info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo * structure. % % */ static MagickBooleanType DrawRoundLinecap(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i = 0; i < 4; i++) linecap[i] = (*primitive_info); linecap[0].coordinates = 4; linecap[1].point.x += 2.0 * MagickEpsilon; linecap[2].point.x += 2.0 * MagickEpsilon; linecap[2].point.y += 2.0 * MagickEpsilon; linecap[3].point.y += 2.0 * MagickEpsilon; linecap[4].primitive = UndefinedPrimitive; return (DrawPolygonPrimitive(image, draw_info, linecap, exception)); } static MagickBooleanType DrawStrokePolygon(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { DrawInfo * clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo * stroke_polygon; register const PrimitiveInfo * p, *q; /* * Draw stroked polygon. */ if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-stroke-polygon"); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill = draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(clone_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; clone_info->stroke_width = 0.0; clone_info->fill_rule = NonZeroRule; status = MagickTrue; for (p = primitive_info; p->primitive != UndefinedPrimitive; p += p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon = TraceStrokePolygon(image, draw_info, p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status = 0; break; } status &= DrawPolygonPrimitive(image, clone_info, stroke_polygon, exception); stroke_polygon = (PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q = p + p->coordinates - 1; closed_path = p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status &= DrawRoundLinecap(image, draw_info, p, exception); status &= DrawRoundLinecap(image, draw_info, q, exception); } } clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-stroke-polygon"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t A f f i n e M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the * identity % matrix. % % The format of the GetAffineMatrix method is: % % * void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of * each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix * affine_matrix) { (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(affine_matrix != (AffineMatrix *) NULL); (void)memset(affine_matrix, 0, sizeof(*affine_matrix)); affine_matrix->sx = 1.0; affine_matrix->sy = 1.0; } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G e t D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetDrawInfo() initializes draw_info to default values from * image_info. % % The format of the GetDrawInfo method is: % % void * GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A * description of each parameter follows: % % o image_info: the image * info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo * image_info, DrawInfo * draw_info) { char *next_token; const char *option; ExceptionInfo * exception; ImageInfo * clone_info; /* * Initialize draw attributes. */ (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info != (DrawInfo *) NULL); (void)memset(draw_info, 0, sizeof(*draw_info)); clone_info = CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception = AcquireExceptionInfo(); (void)QueryColorCompliance("#000F", AllCompliance, &draw_info->fill, exception); (void)QueryColorCompliance("#FFF0", AllCompliance, &draw_info->stroke, exception); draw_info->stroke_antialias = clone_info->antialias; draw_info->stroke_width = 1.0; draw_info->fill_rule = EvenOddRule; draw_info->alpha = OpaqueAlpha; draw_info->fill_alpha = OpaqueAlpha; draw_info->stroke_alpha = OpaqueAlpha; draw_info->linecap = ButtCap; draw_info->linejoin = MiterJoin; draw_info->miterlimit = 10; draw_info->decorate = NoDecoration; draw_info->pointsize = 12.0; draw_info->undercolor.alpha = (MagickRealType) TransparentAlpha; draw_info->compose = OverCompositeOp; draw_info->render = MagickTrue; draw_info->clip_path = MagickFalse; draw_info->debug = IsEventLogging(); if (clone_info->font != (char *)NULL) draw_info->font = AcquireString(clone_info->font); if (clone_info->density != (char *)NULL) draw_info->density = AcquireString(clone_info->density); draw_info->text_antialias = clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize = clone_info->pointsize; draw_info->border_color = clone_info->border_color; if (clone_info->server_name != (char *)NULL) draw_info->server_name = AcquireString(clone_info->server_name); option = GetImageOption(clone_info, "direction"); if (option != (const char *)NULL) draw_info->direction = (DirectionType) ParseCommandOption( MagickDirectionOptions, MagickFalse, option); else draw_info->direction = UndefinedDirection; option = GetImageOption(clone_info, "encoding"); if (option != (const char *)NULL) (void)CloneString(&draw_info->encoding, option); option = GetImageOption(clone_info, "family"); if (option != (const char *)NULL) (void)CloneString(&draw_info->family, option); option = GetImageOption(clone_info, "fill"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->fill, exception); option = GetImageOption(clone_info, "gravity"); if (option != (const char *)NULL) draw_info->gravity = (GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse, option); option = GetImageOption(clone_info, "interline-spacing"); if (option != (const char *)NULL) draw_info->interline_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "interword-spacing"); if (option != (const char *)NULL) draw_info->interword_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "kerning"); if (option != (const char *)NULL) draw_info->kerning = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "stroke"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->stroke, exception); option = GetImageOption(clone_info, "strokewidth"); if (option != (const char *)NULL) draw_info->stroke_width = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "style"); if (option != (const char *)NULL) draw_info->style = (StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse, option); option = GetImageOption(clone_info, "undercolor"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->undercolor, exception); option = GetImageOption(clone_info, "weight"); if (option != (const char *)NULL) { ssize_t weight; weight = ParseCommandOption(MagickWeightOptions, MagickFalse, option); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(option); draw_info->weight = (size_t) weight; } exception = DestroyExceptionInfo(exception); draw_info->signature = MagickCoreSignature; clone_info = DestroyImageInfo(clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + P e r m u t a t e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % Permutate() returns the permuation of the (n,k). % % The format * of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % * % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n, const ssize_t k) { double r; register ssize_t i; r = 1.0; for (i = k + 1; i <= n; i++) r *= i; for (i = 1; i <= (n - k); i++) r /= i; return (r); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + T r a c e P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TracePrimitive is a collection of methods for generating graphic * % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo * mvg_info, const PointInfo start, const PointInfo end, const PointInfo degrees) { PointInfo center, radius; center.x = 0.5 * (end.x + start.x); center.y = 0.5 * (end.y + start.y); radius.x = fabs(center.x - start.x); radius.y = fabs(center.y - start.y); return (TraceEllipse(mvg_info, center, radius, degrees)); } static MagickBooleanType TraceArcPath(MVGInfo * mvg_info, const PointInfo start, const PointInfo end, const PointInfo arc, const double angle, const MagickBooleanType large_arc, const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i; size_t arc_segments; ssize_t offset; offset = mvg_info->offset; primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; primitive_info->coordinates = 0; if ((fabs(start.x - end.x) < MagickEpsilon) && (fabs(start.y - end.y) < MagickEpsilon)) return (TracePoint(primitive_info, end)); radii.x = fabs(arc.x); radii.y = fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return (TraceLine(primitive_info, start, end)); cosine = cos(DegreesToRadians(fmod((double)angle, 360.0))); sine = sin(DegreesToRadians(fmod((double)angle, 360.0))); center.x = (double)(cosine * (end.x - start.x) / 2 + sine * (end.y - start.y) / 2); center.y = (double)(cosine * (end.y - start.y) / 2 - sine * (end.x - start.x) / 2); delta = (center.x * center.x) / (radii.x * radii.x) + (center.y * center.y) / (radii.y * radii.y); if (delta < MagickEpsilon) return (TraceLine(primitive_info, start, end)); if (delta > 1.0) { radii.x *= sqrt((double)delta); radii.y *= sqrt((double)delta); } points[0].x = (double)(cosine * start.x / radii.x + sine * start.y / radii.x); points[0].y = (double)(cosine * start.y / radii.y - sine * start.x / radii.y); points[1].x = (double)(cosine * end.x / radii.x + sine * end.y / radii.x); points[1].y = (double)(cosine * end.y / radii.y - sine * end.x / radii.y); alpha = points[1].x - points[0].x; beta = points[1].y - points[0].y; if (fabs(alpha * alpha + beta * beta) < MagickEpsilon) return (TraceLine(primitive_info, start, end)); factor = PerceptibleReciprocal(alpha * alpha + beta * beta) - 0.25; if (factor <= 0.0) factor = 0.0; else { factor = sqrt((double)factor); if (sweep == large_arc) factor = (-factor); } center.x = (double)((points[0].x + points[1].x) / 2 - factor * beta); center.y = (double)((points[0].y + points[1].y) / 2 + factor * alpha); alpha = atan2(points[0].y - center.y, points[0].x - center.x); theta = atan2(points[1].y - center.y, points[1].x - center.x) - alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta += 2.0 * MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta -= 2.0 * MagickPI; arc_segments = (size_t) ceil(fabs((double)(theta / (0.5 * MagickPI + MagickEpsilon)))); status = MagickTrue; p = primitive_info; for (i = 0; i < (ssize_t) arc_segments; i++) { beta = 0.5 * ((alpha + (i + 1) * theta / arc_segments) - (alpha + i * theta / arc_segments)); gamma = (8.0 / 3.0) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) / sin(fmod((double)beta, DegreesToRadians(360.0))); points[0].x = (double)(center.x + cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) - gamma * sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[0].y = (double)(center.y + sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) + gamma * cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[2].x = (double)(center.x + cos(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[2].y = (double)(center.y + sin(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].x = (double)(points[2].x + gamma * sin(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].y = (double)(points[2].y - gamma * cos(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); p->point.x = (p == primitive_info) ? start.x : (p - 1)->point.x; p->point.y = (p == primitive_info) ? start.y : (p - 1)->point.y; (p + 1)->point.x = (double)(cosine * radii.x * points[0].x - sine * radii.y * points[0].y); (p + 1)->point.y = (double)(sine * radii.x * points[0].x + cosine * radii.y * points[0].y); (p + 2)->point.x = (double)(cosine * radii.x * points[1].x - sine * radii.y * points[1].y); (p + 2)->point.y = (double)(sine * radii.x * points[1].x + cosine * radii.y * points[1].y); (p + 3)->point.x = (double)(cosine * radii.x * points[2].x - sine * radii.y * points[2].y); (p + 3)->point.y = (double)(sine * radii.x * points[2].x + cosine * radii.y * points[2].y); if (i == (ssize_t) (arc_segments - 1)) (p + 3)->point = end; status &= TraceBezier(mvg_info, 4); if (status == 0) break; p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; p += p->coordinates; } if (status == 0) return (MagickFalse); mvg_info->offset = offset; primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickFalse; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo * mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i, j; size_t control_points, quantum; /* * Allocate coefficients. */ primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; quantum = number_coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { for (j = i + 1; j < (ssize_t) number_coordinates; j++) { alpha = fabs(primitive_info[j].point.x - primitive_info[i].point.x); if (alpha > (double)SSIZE_MAX) { (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } if (alpha > (double)quantum) quantum = (size_t) alpha; alpha = fabs(primitive_info[j].point.y - primitive_info[i].point.y); if (alpha > (double)SSIZE_MAX) { (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } if (alpha > (double)quantum) quantum = (size_t) alpha; } } primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; quantum = MagickMin(quantum / number_coordinates, BezierQuantum); coefficients = (double *)AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points = (PointInfo *) AcquireQuantumMemory(quantum, number_coordinates * sizeof(*points)); if ((coefficients == (double *)NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points = (PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *)NULL) coefficients = (double *)RelinquishMagickMemory(coefficients); (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } control_points = quantum * number_coordinates; if (CheckPrimitiveExtent(mvg_info, control_points + 1) == MagickFalse) { points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickFalse); } primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; /* * Compute bezier points. */ end = primitive_info[number_coordinates - 1].point; for (i = 0; i < (ssize_t) number_coordinates; i++) coefficients[i] = Permutate((ssize_t) number_coordinates - 1, i); weight = 0.0; for (i = 0; i < (ssize_t) control_points; i++) { p = primitive_info; point.x = 0.0; point.y = 0.0; alpha = pow((double)(1.0 - weight), (double)number_coordinates - 1.0); for (j = 0; j < (ssize_t) number_coordinates; j++) { point.x += alpha * coefficients[j] * p->point.x; point.y += alpha * coefficients[j] * p->point.y; alpha *= weight / (1.0 - weight); p++; } points[i] = point; weight += 1.0 / control_points; } /* * Bezier curves are just short segmented polys. */ p = primitive_info; for (i = 0; i < (ssize_t) control_points; i++) { if (TracePoint(p, points[i]) == MagickFalse) { points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickFalse); } p += p->coordinates; } if (TracePoint(p, end) == MagickFalse) { points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickFalse); } p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickFalse; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo * mvg_info, const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha = end.x - start.x; beta = end.y - start.y; radius = hypot((double)alpha, (double)beta); offset.x = (double)radius; offset.y = (double)radius; degrees.x = 0.0; degrees.y = 360.0; return (TraceEllipse(mvg_info, start, offset, degrees)); } static MagickBooleanType TraceEllipse(MVGInfo * mvg_info, const PointInfo center, const PointInfo radii, const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i; /* * Ellipses are just short segmented polys. */ primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; primitive_info->coordinates = 0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return (MagickTrue); delta = 2.0 * PerceptibleReciprocal(MagickMax(radii.x, radii.y)); step = MagickPI / 8.0; if ((delta >= 0.0) && (delta < (MagickPI / 8.0))) step = MagickPI / 4.0 / (MagickPI * PerceptibleReciprocal(delta) / 2.0); angle.x = DegreesToRadians(arc.x); y = arc.y; while (y < arc.x) y += 360.0; angle.y = DegreesToRadians(y); coordinates = ceil((angle.y - angle.x) / step + 1.0); if (coordinates > (double)SSIZE_MAX) { (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } if (CheckPrimitiveExtent(mvg_info, (size_t) coordinates) == MagickFalse) return (MagickFalse); primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; for (p = primitive_info; angle.x < angle.y; angle.x += step) { point.x = cos(fmod(angle.x, DegreesToRadians(360.0))) * radii.x + center.x; point.y = sin(fmod(angle.x, DegreesToRadians(360.0))) * radii.y + center.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; } point.x = cos(fmod(angle.y, DegreesToRadians(360.0))) * radii.x + center.x; point.y = sin(fmod(angle.y, DegreesToRadians(360.0))) * radii.y + center.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickFalse; x = fabs(primitive_info[0].point.x - primitive_info[primitive_info->coordinates - 1].point.x); y = fabs(primitive_info[0].point.y - primitive_info[primitive_info->coordinates - 1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath = MagickTrue; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { if (TracePoint(primitive_info, start) == MagickFalse) return (MagickFalse); if ((fabs(start.x - end.x) < MagickEpsilon) && (fabs(start.y - end.y) < MagickEpsilon)) { primitive_info->primitive = PointPrimitive; primitive_info->coordinates = 1; return (MagickTrue); } if (TracePoint(primitive_info + 1, end) == MagickFalse) return (MagickFalse); (primitive_info + 1)->primitive = primitive_info->primitive; primitive_info->coordinates = 2; primitive_info->closed_subpath = MagickFalse; return (MagickTrue); } static ssize_t TracePath(MVGInfo * mvg_info, const char *path, ExceptionInfo * exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = { 0.0, 0.0 }, points[4] = { { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 } }, point = { 0.0, 0.0 }, start = { 0.0, 0.0 }; PrimitiveInfo * primitive_info; PrimitiveType primitive_type; register PrimitiveInfo * q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset = mvg_info->offset; primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; status = MagickTrue; attribute = 0; number_coordinates = 0; z_count = 0; primitive_type = primitive_info->primitive; q = primitive_info; for (p = path; *p != '\0';) { if (status == MagickFalse) break; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == '\0') break; last_attribute = attribute; attribute = (int)(*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = { 0.0, 0.0 }; /* * Elliptical arc. */ do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); arc.x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); arc.y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); large_arc = StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); sweep = StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); end.x = (double)(attribute == (int)'A' ? x : point.x + x); end.y = (double)(attribute == (int)'A' ? y : point.y + y); if (TraceArcPath(mvg_info, point, end, arc, angle, large_arc, sweep) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* * Cubic Bézier curve. */ do { points[0] = point; for (i = 1; i < 4; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); end.x = (double)(attribute == (int)'C' ? x : point.x + x); end.y = (double)(attribute == (int)'C' ? y : point.y + y); points[i] = end; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 4) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.x = (double)(attribute == (int)'H' ? x : point.x + x); if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* * Line to. */ do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.x = (double)(attribute == (int)'L' ? x : point.x + x); point.y = (double)(attribute == (int)'L' ? y : point.y + y); if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* * Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info = (*mvg_info->primitive_info) + subpath_offset; primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; primitive_info = q; subpath_offset = mvg_info->offset; } i = 0; do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.x = (double)(attribute == (int)'M' ? x : point.x + x); point.y = (double)(attribute == (int)'M' ? y : point.y + y); if (i == 0) start = point; i++; if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* * Quadratic Bézier curve. */ do { points[0] = point; for (i = 1; i < 3; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (*p == ',') p++; end.x = (double)(attribute == (int)'Q' ? x : point.x + x); end.y = (double)(attribute == (int)'Q' ? y : point.y + y); points[i] = end; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 3) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* * Cubic Bézier curve. */ do { points[0] = points[3]; points[1].x = 2.0 * points[3].x - points[2].x; points[1].y = 2.0 * points[3].y - points[2].y; for (i = 2; i < 4; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (*p == ',') p++; end.x = (double)(attribute == (int)'S' ? x : point.x + x); end.y = (double)(attribute == (int)'S' ? y : point.y + y); points[i] = end; } if (strchr("CcSs", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 4) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; last_attribute = attribute; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* * Quadratic Bézier curve. */ do { points[0] = points[2]; points[1].x = 2.0 * points[2].x - points[1].x; points[1].y = 2.0 * points[2].y - points[1].y; for (i = 2; i < 3; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); end.x = (double)(attribute == (int)'T' ? x : point.x + x); end.y = (double)(attribute == (int)'T' ? y : point.y + y); points[i] = end; } if (status == MagickFalse) break; if (strchr("QqTt", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 3) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; last_attribute = attribute; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* * Line to. */ do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.y = (double)(attribute == (int)'V' ? y : point.y + y); if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* * Close path. */ point = start; if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; primitive_info = (*mvg_info->primitive_info) + subpath_offset; primitive_info->coordinates = (size_t) (q - primitive_info); primitive_info->closed_subpath = MagickTrue; number_coordinates += primitive_info->coordinates; primitive_info = q; subpath_offset = mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token, exception); break; } } } if (status == MagickFalse) return (-1); primitive_info = (*mvg_info->primitive_info) + subpath_offset; primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive = primitive_type; if (z_count > 1) q->method = FillToBorderMethod; } q = primitive_info; return ((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo * p; register ssize_t i; p = primitive_info; if (TracePoint(p, start) == MagickFalse) return (MagickFalse); p += p->coordinates; point.x = start.x; point.y = end.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; if (TracePoint(p, end) == MagickFalse) return (MagickFalse); p += p->coordinates; point.x = end.x; point.y = start.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; if (TracePoint(p, start) == MagickFalse) return (MagickFalse); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickTrue; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo * mvg_info, const PointInfo start, const PointInfo end, PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i; ssize_t offset; offset = mvg_info->offset; segment.x = fabs(end.x - start.x); segment.y = fabs(end.y - start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info + mvg_info->offset)->coordinates = 0; return (MagickTrue); } if (arc.x > (0.5 * segment.x)) arc.x = 0.5 * segment.x; if (arc.y > (0.5 * segment.y)) arc.y = 0.5 * segment.y; point.x = start.x + segment.x - arc.x; point.y = start.y + arc.y; degrees.x = 270.0; degrees.y = 360.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; point.x = start.x + segment.x - arc.x; point.y = start.y + segment.y - arc.y; degrees.x = 0.0; degrees.y = 90.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; point.x = start.x + arc.x; point.y = start.y + segment.y - arc.y; degrees.x = 90.0; degrees.y = 180.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; point.x = start.x + arc.x; point.y = start.y + arc.y; degrees.x = 180.0; degrees.y = 270.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(p, (*mvg_info->primitive_info + offset)->point) == MagickFalse) return (MagickFalse); p += p->coordinates; mvg_info->offset = offset; primitive_info = (*mvg_info->primitive_info) + offset; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickTrue; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo * primitive_info, const size_t number_vertices, const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx = 0.0; dy = 0.0; for (i = 1; i < (ssize_t) number_vertices; i++) { dx = primitive_info[0].point.x - primitive_info[i].point.x; dy = primitive_info[0].point.y - primitive_info[i].point.y; if ((fabs((double)dx) >= MagickEpsilon) || (fabs((double)dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i = (ssize_t) number_vertices - 1L; distance = hypot((double)dx, (double)dy); primitive_info[0].point.x = (double)(primitive_info[i].point.x + dx * (distance + offset) / distance); primitive_info[0].point.y = (double)(primitive_info[i].point.y + dy * (distance + offset) / distance); for (j = (ssize_t) number_vertices - 2; j >= 0; j--) { dx = primitive_info[number_vertices - 1].point.x - primitive_info[j].point.x; dy = primitive_info[number_vertices - 1].point.y - primitive_info[j].point.y; if ((fabs((double)dx) >= MagickEpsilon) || (fabs((double)dy) >= MagickEpsilon)) break; } distance = hypot((double)dx, (double)dy); primitive_info[number_vertices - 1].point.x = (double)(primitive_info[j].point.x + dx * (distance + offset) / distance); primitive_info[number_vertices - 1].point.y = (double)(primitive_info[j].point.y + dy * (distance + offset) / distance); return (MagickTrue); } static PrimitiveInfo * TraceStrokePolygon(const Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo * polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = { 0.0, 0.0 }, dy = { 0.0, 0.0 }, inverse_slope = { 0.0, 0.0 }, slope = { 0.0, 0.0 }, theta = { 0.0, 0.0 }; /* * Allocate paths. */ number_vertices = primitive_info->coordinates; polygon_primitive = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices + 2UL, sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return ((PrimitiveInfo *) NULL); (void)memcpy(polygon_primitive, primitive_info, (size_t) number_vertices * sizeof(*polygon_primitive)); offset.x = primitive_info[number_vertices - 1].point.x - primitive_info[0].point.x; offset.y = primitive_info[number_vertices - 1].point.y - primitive_info[0].point.y; closed_path = (fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices] = primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive = UndefinedPrimitive; /* * Compute the slope for the first line segment, p. */ dx.p = 0.0; dy.p = 0.0; for (n = 1; n < (ssize_t) number_vertices; n++) { dx.p = polygon_primitive[n].point.x - polygon_primitive[0].point.x; dy.p = polygon_primitive[n].point.y - polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* * Zero length subpath. */ stroke_polygon = (PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0] = polygon_primitive[0]; stroke_polygon[0].coordinates = 0; polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return (stroke_polygon); } n = (ssize_t) number_vertices - 1L; } extent_p = 2 * number_vertices; extent_q = 2 * number_vertices; stroke_p = (PointInfo *) AcquireQuantumMemory((size_t) extent_p + MaxStrokePad, sizeof(*stroke_p)); stroke_q = (PointInfo *) AcquireQuantumMemory((size_t) extent_q + MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p = (PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q = (PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return ((PrimitiveInfo *) NULL); } slope.p = 0.0; inverse_slope.p = 0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p = dy.p < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else slope.p = dy.p < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p = dx.p < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else inverse_slope.p = dx.p < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else { slope.p = dy.p / dx.p; inverse_slope.p = (-1.0 / slope.p); } mid = ExpandAffine(&draw_info->affine) * SaneStrokeWidth(image, draw_info) / 2.0; miterlimit = (double)(draw_info->miterlimit * draw_info->miterlimit * mid * mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void)TraceSquareLinecap(polygon_primitive, number_vertices, mid); offset.x = sqrt((double)(mid * mid / (inverse_slope.p * inverse_slope.p + 1.0))); offset.y = (double)(offset.x * inverse_slope.p); if ((dy.p * offset.x - dx.p * offset.y) > 0.0) { box_p[0].x = polygon_primitive[0].point.x - offset.x; box_p[0].y = polygon_primitive[0].point.y - offset.x * inverse_slope.p; box_p[1].x = polygon_primitive[n].point.x - offset.x; box_p[1].y = polygon_primitive[n].point.y - offset.x * inverse_slope.p; box_q[0].x = polygon_primitive[0].point.x + offset.x; box_q[0].y = polygon_primitive[0].point.y + offset.x * inverse_slope.p; box_q[1].x = polygon_primitive[n].point.x + offset.x; box_q[1].y = polygon_primitive[n].point.y + offset.x * inverse_slope.p; } else { box_p[0].x = polygon_primitive[0].point.x + offset.x; box_p[0].y = polygon_primitive[0].point.y + offset.y; box_p[1].x = polygon_primitive[n].point.x + offset.x; box_p[1].y = polygon_primitive[n].point.y + offset.y; box_q[0].x = polygon_primitive[0].point.x - offset.x; box_q[0].y = polygon_primitive[0].point.y - offset.y; box_q[1].x = polygon_primitive[n].point.x - offset.x; box_q[1].y = polygon_primitive[n].point.y - offset.y; } /* * Create strokes for the line join attribute: bevel, miter, round. */ p = 0; q = 0; stroke_q[p++] = box_q[0]; stroke_p[q++] = box_p[0]; for (i = (ssize_t) n + 1; i < (ssize_t) number_vertices; i++) { /* * Compute the slope for this line segment, q. */ dx.q = polygon_primitive[i].point.x - polygon_primitive[n].point.x; dy.q = polygon_primitive[i].point.y - polygon_primitive[n].point.y; dot_product = dx.q * dx.q + dy.q * dy.q; if (dot_product < 0.25) continue; slope.q = 0.0; inverse_slope.q = 0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q = dy.q < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else slope.q = dy.q < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q = dx.q < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else inverse_slope.q = dx.q < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else { slope.q = dy.q / dx.q; inverse_slope.q = (-1.0 / slope.q); } offset.x = sqrt((double)(mid * mid / (inverse_slope.q * inverse_slope.q + 1.0))); offset.y = (double)(offset.x * inverse_slope.q); dot_product = dy.q * offset.x - dx.q * offset.y; if (dot_product > 0.0) { box_p[2].x = polygon_primitive[n].point.x - offset.x; box_p[2].y = polygon_primitive[n].point.y - offset.y; box_p[3].x = polygon_primitive[i].point.x - offset.x; box_p[3].y = polygon_primitive[i].point.y - offset.y; box_q[2].x = polygon_primitive[n].point.x + offset.x; box_q[2].y = polygon_primitive[n].point.y + offset.y; box_q[3].x = polygon_primitive[i].point.x + offset.x; box_q[3].y = polygon_primitive[i].point.y + offset.y; } else { box_p[2].x = polygon_primitive[n].point.x + offset.x; box_p[2].y = polygon_primitive[n].point.y + offset.y; box_p[3].x = polygon_primitive[i].point.x + offset.x; box_p[3].y = polygon_primitive[i].point.y + offset.y; box_q[2].x = polygon_primitive[n].point.x - offset.x; box_q[2].y = polygon_primitive[n].point.y - offset.y; box_q[3].x = polygon_primitive[i].point.x - offset.x; box_q[3].y = polygon_primitive[i].point.y - offset.y; } if (fabs((double)(slope.p - slope.q)) < MagickEpsilon) { box_p[4] = box_p[1]; box_q[4] = box_q[1]; } else { box_p[4].x = (double)((slope.p * box_p[0].x - box_p[0].y - slope.q * box_p[3].x + box_p[3].y) / (slope.p - slope.q)); box_p[4].y = (double)(slope.p * (box_p[4].x - box_p[0].x) + box_p[0].y); box_q[4].x = (double)((slope.p * box_q[0].x - box_q[0].y - slope.q * box_q[3].x + box_q[3].y) / (slope.p - slope.q)); box_q[4].y = (double)(slope.p * (box_q[4].x - box_q[0].x) + box_q[0].y); } CheckPathExtent(MaxStrokePad, MaxStrokePad); dot_product = dx.q * dy.p - dx.p * dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++] = box_p[4]; else { stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++] = box_q[4]; stroke_p[p++] = box_p[4]; } else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++] = box_p[4]; else { stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_q[1].y - center.y, box_q[1].x - center.x); theta.q = atan2(box_q[2].y - center.y, box_q[2].x - center.x); if (theta.q < theta.p) theta.q += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.q - theta.p) / (2.0 * sqrt((double)(1.0 / mid))))); CheckPathExtent(MaxStrokePad, arc_segments + MaxStrokePad); stroke_q[q].x = box_q[1].x; stroke_q[q].y = box_q[1].y; q++; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); stroke_q[q].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); stroke_q[q].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); q++; } stroke_q[q++] = box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++] = box_q[4]; else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++] = box_q[4]; stroke_p[p++] = box_p[4]; } else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++] = box_q[4]; else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_p[1].y - center.y, box_p[1].x - center.x); theta.q = atan2(box_p[2].y - center.y, box_p[2].x - center.x); if (theta.p < theta.q) theta.p += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.p - theta.q) / (2.0 * sqrt((double)(1.0 / mid))))); CheckPathExtent(arc_segments + MaxStrokePad, MaxStrokePad); stroke_p[p++] = box_p[1]; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); stroke_p[p].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); stroke_p[p].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); p++; } stroke_p[p++] = box_p[2]; break; } default: break; } slope.p = slope.q; inverse_slope.p = inverse_slope.q; box_p[0] = box_p[2]; box_p[1] = box_p[3]; box_q[0] = box_q[2]; box_q[1] = box_q[3]; dx.p = dx.q; dy.p = dy.q; n = i; } stroke_p[p++] = box_p[1]; stroke_q[q++] = box_q[1]; /* * Trace stroked polygon. */ stroke_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (p + q + 2UL * closed_path + 2UL), sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i = 0; i < (ssize_t) p; i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; } for (; i < (ssize_t) (p + q + closed_path); i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_q[p + q + closed_path - (i + 1)]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[p + closed_path].point; i++; } stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; stroke_polygon[i].primitive = UndefinedPrimitive; stroke_polygon[0].coordinates = (size_t) (p + q + 2 * closed_path + 1); } stroke_p = (PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q = (PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return (stroke_polygon); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* * Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* * Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo * points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo ** primitive_info; size_t * extent; ssize_t offset; PointInfo point; ExceptionInfo * exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo * edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* * Forward declarations. */ static Image * DrawClippingMask(Image *, const DrawInfo *, const char *, const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *, const DrawInfo *, const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *, const DrawInfo *, const size_t, ExceptionInfo *), TraceArc(MVGInfo *, const PointInfo, const PointInfo, const PointInfo), TraceArcPath(MVGInfo *, const PointInfo, const PointInfo, const PointInfo, const double, const MagickBooleanType, const MagickBooleanType), TraceBezier(MVGInfo *, const size_t), TraceCircle(MVGInfo *, const PointInfo, const PointInfo), TraceEllipse(MVGInfo *, const PointInfo, const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRectangle(PrimitiveInfo *, const PointInfo, const PointInfo), TraceRoundRectangle(MVGInfo *, const PointInfo, const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *, const size_t, const double); static PrimitiveInfo * TraceStrokePolygon(const Image *, const DrawInfo *, const PrimitiveInfo *); static ssize_t TracePath(MVGInfo *, const char *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A c q u i r e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AcquireDrawInfo() returns a DrawInfo structure properly * initialized. % % The format of the AcquireDrawInfo method is: % % * DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo * AcquireDrawInfo(void) { DrawInfo * draw_info; draw_info = (DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL, draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l o n e D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. * If NULL % is specified, a new DrawInfo structure is created initialized * to default % values. % % The format of the CloneDrawInfo method is: % % * DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const * DrawInfo *draw_info) % % A description of each parameter follows: % % * o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo * CloneDrawInfo(const ImageInfo * image_info, const DrawInfo * draw_info) { DrawInfo * clone_info; ExceptionInfo * exception; clone_info = (DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info, clone_info); if (draw_info == (DrawInfo *) NULL) return (clone_info); exception = AcquireExceptionInfo(); if (draw_info->id != (char *)NULL) (void)CloneString(&clone_info->id, draw_info->id); if (draw_info->primitive != (char *)NULL) (void)CloneString(&clone_info->primitive, draw_info->primitive); if (draw_info->geometry != (char *)NULL) (void)CloneString(&clone_info->geometry, draw_info->geometry); clone_info->compliance = draw_info->compliance; clone_info->viewbox = draw_info->viewbox; clone_info->affine = draw_info->affine; clone_info->gravity = draw_info->gravity; clone_info->fill = draw_info->fill; clone_info->stroke = draw_info->stroke; clone_info->stroke_width = draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(draw_info->fill_pattern, 0, 0, MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern = CloneImage(draw_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke_antialias = draw_info->stroke_antialias; clone_info->text_antialias = draw_info->text_antialias; clone_info->fill_rule = draw_info->fill_rule; clone_info->linecap = draw_info->linecap; clone_info->linejoin = draw_info->linejoin; clone_info->miterlimit = draw_info->miterlimit; clone_info->dash_offset = draw_info->dash_offset; clone_info->decorate = draw_info->decorate; clone_info->compose = draw_info->compose; if (draw_info->text != (char *)NULL) (void)CloneString(&clone_info->text, draw_info->text); if (draw_info->font != (char *)NULL) (void)CloneString(&clone_info->font, draw_info->font); if (draw_info->metrics != (char *)NULL) (void)CloneString(&clone_info->metrics, draw_info->metrics); if (draw_info->family != (char *)NULL) (void)CloneString(&clone_info->family, draw_info->family); clone_info->style = draw_info->style; clone_info->stretch = draw_info->stretch; clone_info->weight = draw_info->weight; if (draw_info->encoding != (char *)NULL) (void)CloneString(&clone_info->encoding, draw_info->encoding); clone_info->pointsize = draw_info->pointsize; clone_info->kerning = draw_info->kerning; clone_info->interline_spacing = draw_info->interline_spacing; clone_info->interword_spacing = draw_info->interword_spacing; clone_info->direction = draw_info->direction; if (draw_info->density != (char *)NULL) (void)CloneString(&clone_info->density, draw_info->density); clone_info->align = draw_info->align; clone_info->undercolor = draw_info->undercolor; clone_info->border_color = draw_info->border_color; if (draw_info->server_name != (char *)NULL) (void)CloneString(&clone_info->server_name, draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) { register ssize_t x; for (x = 0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++); clone_info->dash_pattern = (double *)AcquireQuantumMemory((size_t) (2 * x + 2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *)NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)memset(clone_info->dash_pattern, 0, (size_t) (2 * x + 2) * sizeof(*clone_info->dash_pattern)); (void)memcpy(clone_info->dash_pattern, draw_info->dash_pattern, (size_t) (x + 1) * sizeof(*clone_info->dash_pattern)); } clone_info->gradient = draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops = clone_info->gradient.number_stops; clone_info->gradient.stops = (StopInfo *) AcquireQuantumMemory((size_t) number_stops, sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void)memcpy(clone_info->gradient.stops, draw_info->gradient.stops, (size_t) number_stops * sizeof(*clone_info->gradient.stops)); } clone_info->bounds = draw_info->bounds; clone_info->fill_alpha = draw_info->fill_alpha; clone_info->stroke_alpha = draw_info->stroke_alpha; clone_info->element_reference = draw_info->element_reference; clone_info->clip_path = draw_info->clip_path; clone_info->clip_units = draw_info->clip_units; if (draw_info->clip_mask != (char *)NULL) (void)CloneString(&clone_info->clip_mask, draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask = CloneImage(draw_info->clipping_mask, 0, 0, MagickTrue, exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask = CloneImage(draw_info->composite_mask, 0, 0, MagickTrue, exception); clone_info->render = draw_info->render; clone_info->debug = IsEventLogging(); exception = DestroyExceptionInfo(exception); return (clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P a t h T o P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPathToPolygon() converts a path to the more efficient * sorted % rendering form. % % The format of the ConvertPathToPolygon * method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo * *path_info) % % A description of each parameter follows: % % o Method * ConvertPathToPolygon returns the path in a more efficient sorted % * rendering form of type PolygonInfo. % % o draw_info: Specifies a * pointer to an DrawInfo structure. % % o path_info: Specifies a pointer * to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge, const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo * p, *q; /* * Edge sorting for right-handed coordinate system. */ p = ((const EdgeInfo *)p_edge)->points; q = ((const EdgeInfo *)q_edge)->points; DrawCompareEdge(p[0].y, q[0].y); DrawCompareEdge(p[0].x, q[0].x); DrawCompareEdge((p[1].x - p[0].x) * (q[1].y - q[0].y), (p[1].y - p[0].y) * (q[1].x - q[0].x)); DrawCompareEdge(p[1].y, q[1].y); DrawCompareEdge(p[1].x, q[1].x); return (0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo * polygon_info) { register EdgeInfo * p; register ssize_t i, j; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin active-edge"); p = polygon_info->edges; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " edge %.20g:", (double)i); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " direction: %s", p->direction != MagickFalse ? "down" : "up"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " bounds: %g,%g - %g,%g", p->bounds.x1, p->bounds.y1, p->bounds.x2, p->bounds.y2); for (j = 0; j < (ssize_t) p->number_points; j++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g", p->points[j].x, p->points[j].y); p++; } (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end active-edge"); } static void ReversePoints(PointInfo * points, const size_t number_points) { PointInfo point; register ssize_t i; for (i = 0; i < (ssize_t) (number_points >> 1); i++) { point = points[i]; points[i] = points[number_points - (i + 1)]; points[number_points - (i + 1)] = point; } } static PolygonInfo * ConvertPathToPolygon(const PathInfo * path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo * polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* * Convert a path to the more efficient sorted rendering form. */ polygon_info = (PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return ((PolygonInfo *) NULL); number_edges = 16; polygon_info->edges = (EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); (void)memset(polygon_info->edges, 0, number_edges * sizeof(*polygon_info->edges)); direction = 0; edge = 0; ghostline = MagickFalse; n = 0; number_points = 0; points = (PointInfo *) NULL; (void)memset(&point, 0, sizeof(point)); (void)memset(&bounds, 0, sizeof(bounds)); polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = 0.0; polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) direction; polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->number_edges = 0; for (i = 0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* * Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; points = (PointInfo *) NULL; ghostline = MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } ghostline = path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point = path_info[i].point; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; direction = 0; n = 1; continue; } /* * Line to. */ next_direction = ((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y - point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* * New edge. */ point = points[n - 1]; if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; number_points = 16; points = (PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); n = 1; ghostline = MagickFalse; points[0] = point; bounds.x1 = point.x; bounds.x2 = point.x; edge++; } direction = next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points <<= 1; points = (PointInfo *) ResizeQuantumMemory(points, (size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return ((PolygonInfo *) NULL); } point = path_info[i].point; points[n] = point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.x > bounds.x2) bounds.x2 = point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points = (PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges <<= 1; polygon_info->edges = (EdgeInfo *) ResizeQuantumMemory( polygon_info->edges, (size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return ((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points = (size_t) n; polygon_info->edges[edge].scanline = (-1.0); polygon_info->edges[edge].highwater = 0; polygon_info->edges[edge].ghostline = ghostline; polygon_info->edges[edge].direction = (ssize_t) (direction > 0); if (direction < 0) ReversePoints(points, (size_t) n); polygon_info->edges[edge].points = points; polygon_info->edges[edge].bounds = bounds; polygon_info->edges[edge].bounds.y1 = points[0].y; polygon_info->edges[edge].bounds.y2 = points[n - 1].y; ghostline = MagickFalse; edge++; } } polygon_info->number_edges = edge; qsort(polygon_info->edges, (size_t) polygon_info->number_edges, sizeof(*polygon_info->edges), DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return (polygon_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C o n v e r t P r i m i t i v e T o P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into * a vector % path structure. % % The format of the ConvertPrimitiveToPath * method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo * *draw_info, % const PrimitiveInfo *primitive_info) % % A * description of each parameter follows: % % o Method * ConvertPrimitiveToPath returns a vector path structure of type % * PathInfo. % % o draw_info: a structure of type DrawInfo. % % o * primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo * path_info) { register const PathInfo * p; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin vector-path"); for (p = path_info; p->code != EndCode; p++) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %g,%g %s", p->point.x, p->point.y, p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end vector-path"); } static PathInfo * ConvertPrimitiveToPath(const PrimitiveInfo * primitive_info) { MagickBooleanType closed_subpath; PathInfo * path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* * Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return ((PathInfo *) NULL); default: break; } for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); path_info = (PathInfo *) AcquireQuantumMemory((size_t) (3UL * i + 1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return ((PathInfo *) NULL); coordinates = 0; closed_subpath = MagickFalse; n = 0; p.x = (-1.0); p.y = (-1.0); q.x = (-1.0); q.y = (-1.0); start = 0; for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code = LineToCode; if (coordinates <= 0) { /* * New subpath. */ coordinates = (ssize_t) primitive_info[i].coordinates; p = primitive_info[i].point; start = n; code = MoveToCode; closed_subpath = primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x - primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y - primitive_info[i].point.y) >= MagickEpsilon)) { /* * Eliminate duplicate points. */ path_info[n].code = code; path_info[n].point = primitive_info[i].point; q = primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath = MagickFalse; continue; } /* * Mark the p point as open if the subpath is not closed. */ path_info[start].code = OpenCode; path_info[n].code = GhostlineCode; path_info[n].point = primitive_info[i].point; n++; path_info[n].code = LineToCode; path_info[n].point = p; n++; } path_info[n].code = EndCode; path_info[n].point.x = 0.0; path_info[n].point.y = 0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info = (PathInfo *) ResizeQuantumMemory(path_info, (size_t) (n + 1), sizeof(*path_info)); return (path_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s t r o y D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo * structure. % % The format of the DestroyDrawInfo method is: % % * DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each * parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo * DestroyDrawInfo(DrawInfo * draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *)NULL) draw_info->id = DestroyString(draw_info->id); if (draw_info->primitive != (char *)NULL) draw_info->primitive = DestroyString(draw_info->primitive); if (draw_info->text != (char *)NULL) draw_info->text = DestroyString(draw_info->text); if (draw_info->geometry != (char *)NULL) draw_info->geometry = DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern = DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern = DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *)NULL) draw_info->font = DestroyString(draw_info->font); if (draw_info->metrics != (char *)NULL) draw_info->metrics = DestroyString(draw_info->metrics); if (draw_info->family != (char *)NULL) draw_info->family = DestroyString(draw_info->family); if (draw_info->encoding != (char *)NULL) draw_info->encoding = DestroyString(draw_info->encoding); if (draw_info->density != (char *)NULL) draw_info->density = DestroyString(draw_info->density); if (draw_info->server_name != (char *)NULL) draw_info->server_name = (char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *)NULL) draw_info->dash_pattern = (double *)RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops = (StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *)NULL) draw_info->clip_mask = DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask = DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask = DestroyImage(draw_info->composite_mask); draw_info->signature = (~MagickCoreSignature); draw_info = (DrawInfo *) RelinquishMagickMemory(draw_info); return (draw_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y E d g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyEdge() destroys the specified polygon edge. % % The * format of the DestroyEdge method is: % % ssize_t * DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description * of each parameter follows: % % o polygon_info: Specifies a pointer to * an PolygonInfo structure. % % o edge: the polygon edge number to * destroy. % */ static size_t DestroyEdge(PolygonInfo * polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points = (PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void)memmove(polygon_info->edges + edge, polygon_info->edges + edge + 1, (size_t) (polygon_info->number_edges - edge) * sizeof(*polygon_info->edges)); return (polygon_info->number_edges); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D e s t r o y P o l y g o n I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % * % The format of the DestroyPolygonInfo method is: % % PolygonInfo * *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each * parameter follows: % % o polygon_info: Specifies a pointer to an * PolygonInfo structure. % */ static PolygonInfo * DestroyPolygonInfo(PolygonInfo * polygon_info) { register ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points = (PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges = (EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return ((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w A f f i n e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawAffineImage() composites the source over the destination * image as % dictated by the affine transform. % % The format of the * DrawAffineImage method is: % % MagickBooleanType * DrawAffineImage(Image *image,const Image *source, % const * AffineMatrix *affine,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o source: the source * image. % % o affine: the affine transform. % % o exception: return * any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image * image, const AffineMatrix * affine, const double y, const SegmentInfo * edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* * Determine left and right edges. */ inverse_edge.x1 = edge->x1; inverse_edge.y1 = edge->y1; inverse_edge.x2 = edge->x2; inverse_edge.y2 = edge->y2; z = affine->ry * y + affine->tx; if (affine->sx >= MagickEpsilon) { intercept = (-z / affine->sx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->sx < -MagickEpsilon) { intercept = (-z + (double)image->columns) / affine->sx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->sx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->columns)) { inverse_edge.x2 = edge->x1; return (inverse_edge); } /* * Determine top and bottom edges. */ z = affine->sy * y + affine->ty; if (affine->rx >= MagickEpsilon) { intercept = (-z / affine->rx); x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if (affine->rx < -MagickEpsilon) { intercept = (-z + (double)image->rows) / affine->rx; x = intercept; if (x > inverse_edge.x1) inverse_edge.x1 = x; intercept = (-z / affine->rx); x = intercept; if (x < inverse_edge.x2) inverse_edge.x2 = x; } else if ((z < 0.0) || ((size_t) floor(z + 0.5) >= image->rows)) { inverse_edge.x2 = edge->x2; return (inverse_edge); } return (inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix * affine) { AffineMatrix inverse_affine; double determinant; determinant = PerceptibleReciprocal(affine->sx * affine->sy - affine->rx * affine->ry); inverse_affine.sx = determinant * affine->sy; inverse_affine.rx = determinant * (-affine->rx); inverse_affine.ry = determinant * (-affine->ry); inverse_affine.sy = determinant * affine->sx; inverse_affine.tx = (-affine->tx) * inverse_affine.sx - affine->ty * inverse_affine.ry; inverse_affine.ty = (-affine->tx) * inverse_affine.rx - affine->ty * inverse_affine.sy; return (inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image * image, const Image * source, const AffineMatrix * affine, ExceptionInfo * exception) { AffineMatrix inverse_affine; CacheView * image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* * Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(source != (const Image *)NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x = 0.0; extent[0].y = 0.0; extent[1].x = (double)source->columns - 1.0; extent[1].y = 0.0; extent[2].x = (double)source->columns - 1.0; extent[2].y = (double)source->rows - 1.0; extent[3].x = 0.0; extent[3].y = (double)source->rows - 1.0; for (i = 0; i < 4; i++) { PointInfo point; point = extent[i]; extent[i].x = point.x * affine->sx + point.y * affine->ry + affine->tx; extent[i].y = point.x * affine->rx + point.y * affine->sy + affine->ty; } min = extent[0]; max = extent[0]; for (i = 1; i < 4; i++) { if (min.x > extent[i].x) min.x = extent[i].x; if (min.y > extent[i].y) min.y = extent[i].y; if (max.x < extent[i].x) max.x = extent[i].x; if (max.y < extent[i].y) max.y = extent[i].y; } /* * Affine transform image. */ if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = MagickTrue; edge.x1 = MagickMax(min.x, 0.0); edge.y1 = MagickMax(min.y, 0.0); edge.x2 = MagickMin(max.x, (double)image->columns - 1.0); edge.y2 = MagickMin(max.y, (double)image->rows - 1.0); inverse_affine = InverseAffineMatrix(affine); GetPixelInfo(image, &zero); start = (ssize_t) ceil(edge.y1 - 0.5); stop = (ssize_t) floor(edge.y2 + 0.5); source_view = AcquireVirtualCacheView(source, exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y = start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum * magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge = AffineEdge(source, &inverse_affine, (double)y, &edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q = GetCacheViewAuthenticPixels(image_view, (ssize_t) ceil(inverse_edge.x1 - 0.5), y, (size_t) (floor(inverse_edge.x2 + 0.5) - ceil(inverse_edge.x1 - 0.5) + 1), 1, exception); if (q == (Quantum *) NULL) continue; pixel = zero; composite = zero; x_offset = 0; for (x = (ssize_t) ceil(inverse_edge.x1 - 0.5); x <= (ssize_t) floor(inverse_edge.x2 + 0.5); x++) { point.x = (double)x *inverse_affine.sx + y * inverse_affine.ry + inverse_affine.tx; point.y = (double)x *inverse_affine.rx + y * inverse_affine.sy + inverse_affine.ty; status = InterpolatePixelInfo(source, source_view, UndefinedInterpolatePixel, point.x, point.y, &pixel, exception); if (status == MagickFalse) break; GetPixelInfoPixel(image, q, &composite); CompositePixelInfoOver(&pixel, pixel.alpha, &composite, composite.alpha, &composite); SetPixelViaPixelInfo(image, &composite, q); x_offset++; q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } source_view = DestroyCacheView(source_view); image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w B o u n d i n g R e c t a n g l e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawBoundingRectangles() draws the bounding rectangles on the * image. This % is only useful for developers debugging the rendering * algorithm. % % The format of the DrawBoundingRectangles method is: % % * MagickBooleanType DrawBoundingRectangles(Image *image, % const * DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o draw_info: the draw info. % % o polygon_info: * Specifies a pointer to a PolygonInfo structure. % % o exception: return * any errors or warnings in this structure. % */ static inline double SaneStrokeWidth(const Image * image, const DrawInfo * draw_info) { return (MagickMin((double)draw_info->stroke_width, (2.0 * sqrt(2.0) + MagickEpsilon) * MagickMax(image->columns, image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image * image, const DrawInfo * draw_info, const PolygonInfo * polygon_info, ExceptionInfo * exception) { double mid; DrawInfo * clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void)memset(primitive_info, 0, sizeof(primitive_info)); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); status = QueryColorCompliance("#000F", AllCompliance, &clone_info->fill, exception); if (status == MagickFalse) { clone_info = DestroyDrawInfo(clone_info); return (MagickFalse); } resolution.x = 96.0; resolution.y = 96.0; if (clone_info->density != (char *)NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags = ParseGeometry(clone_info->density, &geometry_info); resolution.x = geometry_info.rho; resolution.y = geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y = resolution.x; } mid = (resolution.x / 96.0) * ExpandAffine(&clone_info->affine) * SaneStrokeWidth(image, clone_info) / 2.0; bounds.x1 = 0.0; bounds.y1 = 0.0; bounds.x2 = 0.0; bounds.y2 = 0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds = polygon_info->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double)bounds.x1) bounds.x1 = polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double)bounds.y1) bounds.y1 = polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double)bounds.x2) bounds.x2 = polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double)bounds.y2) bounds.y2 = polygon_info->edges[i].bounds.y2; } bounds.x1 -= mid; bounds.x1 = bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double)image->columns - 1 : bounds.x1; bounds.y1 -= mid; bounds.y1 = bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double)image->rows - 1 : bounds.y1; bounds.x2 += mid; bounds.x2 = bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double)image->columns - 1 : bounds.x2; bounds.y2 += mid; bounds.y2 = bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double)image->rows - 1 : bounds.y2; for (i = 0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status = QueryColorCompliance("#f00", AllCompliance, &clone_info->stroke, exception); else status = QueryColorCompliance("#0f0", AllCompliance, &clone_info->stroke, exception); if (status == MagickFalse) break; start.x = (double)(polygon_info->edges[i].bounds.x1 - mid); start.y = (double)(polygon_info->edges[i].bounds.y1 - mid); end.x = (double)(polygon_info->edges[i].bounds.x2 + mid); end.y = (double)(polygon_info->edges[i].bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; status &= TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; status = DrawPrimitive(image, clone_info, primitive_info, exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info = DestroyDrawInfo(clone_info); return (status == 0 ? MagickFalse : MagickTrue); } } status = QueryColorCompliance("#00f", AllCompliance, &clone_info->stroke, exception); if (status == MagickFalse) { clone_info = DestroyDrawInfo(clone_info); return (MagickFalse); } start.x = (double)(bounds.x1 - mid); start.y = (double)(bounds.y1 - mid); end.x = (double)(bounds.x2 + mid); end.y = (double)(bounds.y2 + mid); primitive_info[0].primitive = RectanglePrimitive; status &= TraceRectangle(primitive_info, start, end); primitive_info[0].method = ReplaceMethod; coordinates = (ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive = UndefinedPrimitive; status = DrawPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); return (status == 0 ? MagickFalse : MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C l i p P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawClipPath() draws the clip path on the image mask. % % The * format of the DrawClipPath method is: % % MagickBooleanType * DrawClipPath(Image *image,const DrawInfo *draw_info, % const char * *id,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o id: the clip path id. % % o exception: return any errors or warnings * in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image * image, const DrawInfo * draw_info, const char *id, ExceptionInfo * exception) { const char *clip_path; Image * clipping_mask; MagickBooleanType status; clip_path = GetImageArtifact(image, id); if (clip_path == (const char *)NULL) return (MagickFalse); clipping_mask = DrawClippingMask(image, draw_info, draw_info->clip_mask, clip_path, exception); if (clipping_mask == (Image *) NULL) return (MagickFalse); status = SetImageMask(image, WritePixelMask, clipping_mask, exception); clipping_mask = DestroyImage(clipping_mask); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C l i p p i n g M a s k * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawClippingMask() draws the clip path and returns it as an * image clipping % mask. % % The format of the DrawClippingMask method is: * % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % * const char *id,const char *clip_path,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * draw_info: the draw info. % % o id: the clip path id. % % o * clip_path: the clip path. % % o exception: return any errors or * warnings in this structure. % */ static Image * DrawClippingMask(Image * image, const DrawInfo * draw_info, const char *id, const char *clip_path, ExceptionInfo * exception) { DrawInfo * clone_info; Image * clip_mask, *separate_mask; MagickStatusType status; /* * Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); clip_mask = AcquireImage((const ImageInfo *)NULL, exception); status = SetImageExtent(clip_mask, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImage(clip_mask)); status = SetImageMask(clip_mask, WritePixelMask, (Image *) NULL, exception); status = QueryColorCompliance("#0000", AllCompliance, &clip_mask->background_color, exception); clip_mask->background_color.alpha = (MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait = BlendPixelTrait; status = SetImageBackgroundColor(clip_mask, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "\nbegin clip-path %s", id); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->primitive, clip_path); status = QueryColorCompliance("#ffffff", AllCompliance, &clone_info->fill, exception); if (clone_info->clip_mask != (char *)NULL) clone_info->clip_mask = DestroyString(clone_info->clip_mask); status = QueryColorCompliance("#00000000", AllCompliance, &clone_info->stroke, exception); clone_info->stroke_width = 0.0; clone_info->alpha = OpaqueAlpha; clone_info->clip_path = MagickTrue; status = RenderMVGContent(clip_mask, clone_info, 0, exception); clone_info = DestroyDrawInfo(clone_info); separate_mask = SeparateImage(clip_mask, AlphaChannel, exception); if (separate_mask != (Image *) NULL) { clip_mask = DestroyImage(clip_mask); clip_mask = separate_mask; status = NegateImage(clip_mask, MagickFalse, exception); if (status == MagickFalse) clip_mask = DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask = DestroyImage(clip_mask); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end clip-path"); return (clip_mask); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w C o m p o s i t e M a s k * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawCompositeMask() draws the mask path and returns it as an * image mask. % % The format of the DrawCompositeMask method is: % % * Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % * const char *id,const char *mask_path,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * draw_info: the draw info. % % o id: the mask path id. % % o * mask_path: the mask path. % % o exception: return any errors or * warnings in this structure. % */ static Image * DrawCompositeMask(Image * image, const DrawInfo * draw_info, const char *id, const char *mask_path, ExceptionInfo * exception) { Image * composite_mask, *separate_mask; DrawInfo * clone_info; MagickStatusType status; /* * Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); composite_mask = AcquireImage((const ImageInfo *)NULL, exception); status = SetImageExtent(composite_mask, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImage(composite_mask)); status = SetImageMask(composite_mask, CompositePixelMask, (Image *) NULL, exception); status = QueryColorCompliance("#0000", AllCompliance, &composite_mask->background_color, exception); composite_mask->background_color.alpha = (MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait = BlendPixelTrait; (void)SetImageBackgroundColor(composite_mask, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "\nbegin mask-path %s", id); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->primitive, mask_path); status = QueryColorCompliance("#ffffff", AllCompliance, &clone_info->fill, exception); status = QueryColorCompliance("#00000000", AllCompliance, &clone_info->stroke, exception); clone_info->stroke_width = 0.0; clone_info->alpha = OpaqueAlpha; status = RenderMVGContent(composite_mask, clone_info, 0, exception); clone_info = DestroyDrawInfo(clone_info); separate_mask = SeparateImage(composite_mask, AlphaChannel, exception); if (separate_mask != (Image *) NULL) { composite_mask = DestroyImage(composite_mask); composite_mask = separate_mask; status = NegateImage(composite_mask, MagickFalse, exception); if (status == MagickFalse) composite_mask = DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask = DestroyImage(composite_mask); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end mask-path"); return (composite_mask); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w D a s h P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, * ellipse) on the % image while respecting the dash offset and dash pattern * attributes. % % The format of the DrawDashPolygon method is: % % * MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % * const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o * draw_info: the draw info. % % o primitive_info: Specifies a pointer to * a PrimitiveInfo structure. % % o image: the image. % % o exception: * return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, Image * image, ExceptionInfo * exception) { double length, maximum_length, offset, scale, total_length; DrawInfo * clone_info; MagickStatusType status; PrimitiveInfo * dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-dash"); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++); number_vertices = (size_t) i; dash_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL * number_vertices + 32UL), sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return (MagickFalse); (void)memset(dash_polygon, 0, (2UL * number_vertices + 32UL) * sizeof(*dash_polygon)); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->miterlimit = 0; dash_polygon[0] = primitive_info[0]; scale = ExpandAffine(&draw_info->affine); length = scale * draw_info->dash_pattern[0]; offset = fabs(draw_info->dash_offset) >= MagickEpsilon ? scale * draw_info->dash_offset : 0.0; j = 1; for (n = 0; offset > 0.0; j = 0) { if (draw_info->dash_pattern[n] <= 0.0) break; length = scale * (draw_info->dash_pattern[n] + (n == 0 ? -0.5 : 0.5)); if (offset > length) { offset -= length; n++; length = scale * draw_info->dash_pattern[n]; continue; } if (offset < length) { length -= offset; offset = 0.0; break; } offset = 0.0; n++; } status = MagickTrue; maximum_length = 0.0; total_length = 0.0; for (i = 1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx = primitive_info[i].point.x - primitive_info[i - 1].point.x; dy = primitive_info[i].point.y - primitive_info[i - 1].point.y; maximum_length = hypot(dx, dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n = 0; length = scale * draw_info->dash_pattern[n]; } for (total_length = 0.0; (length >= 0.0) && (maximum_length >= (total_length + length));) { total_length += length; if ((n & 0x01) != 0) { dash_polygon[0] = primitive_info[0]; dash_polygon[0].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length * PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length * PerceptibleReciprocal(maximum_length)); j = 1; } else { if ((j + 1) > (ssize_t) number_vertices) break; dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x = (double)(primitive_info[i - 1].point.x + dx * total_length * PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y = (double)(primitive_info[i - 1].point.y + dy * total_length * PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n = 0; length = scale * draw_info->dash_pattern[n]; } length -= (maximum_length - total_length); if ((n & 0x01) != 0) continue; dash_polygon[j] = primitive_info[i]; dash_polygon[j].coordinates = 1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j] = primitive_info[i - 1]; dash_polygon[j].point.x += MagickEpsilon; dash_polygon[j].point.y += MagickEpsilon; dash_polygon[j].coordinates = 1; j++; dash_polygon[0].coordinates = (size_t) j; dash_polygon[j].primitive = UndefinedPrimitive; status &= DrawStrokePolygon(image, clone_info, dash_polygon, exception); } dash_polygon = (PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-dash"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w G r a d i e n t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawGradientImage() draws a linear gradient on the image. % % * The format of the DrawGradientImage method is: % % MagickBooleanType * DrawGradientImage(Image *image, % const DrawInfo * *draw_info,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o draw_info: the draw info. % % * o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo * gradient, const ssize_t x, const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo * gradient_vector; gradient_vector = (&gradient->gradient_vector); p.x = gradient_vector->x2 - gradient_vector->x1; p.y = gradient_vector->y2 - gradient_vector->y1; q.x = (double)x - gradient_vector->x1; q.y = (double)y - gradient_vector->y1; length = sqrt(q.x * q.x + q.y * q.y); gamma = sqrt(p.x * p.x + p.y * p.y) * length; gamma = PerceptibleReciprocal(gamma); scale = p.x * q.x + p.y * q.y; offset = gamma * scale * length; return (offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x = (double)x - gradient->center.x; v.y = (double)y - gradient->center.y; return (sqrt(v.x * v.x + v.y * v.y)); } v.x = (double)(((x - gradient->center.x) * cos(DegreesToRadians( gradient->angle))) + ((y - gradient->center.y) * sin(DegreesToRadians( gradient->angle)))) * PerceptibleReciprocal(gradient->radii.x); v.y = (double)(((x - gradient->center.x) * sin(DegreesToRadians( gradient->angle))) - ((y - gradient->center.y) * cos(DegreesToRadians( gradient->angle)))) * PerceptibleReciprocal(gradient->radii.y); return (sqrt(v.x * v.x + v.y * v.y)); } } return (0.0); } static int StopInfoCompare(const void *x, const void *y) { StopInfo * stop_1, *stop_2; stop_1 = (StopInfo *) x; stop_2 = (StopInfo *) y; if (stop_1->offset > stop_2->offset) return (1); if (fabs(stop_1->offset - stop_2->offset) <= MagickEpsilon) return (0); return (-1); } MagickExport MagickBooleanType DrawGradientImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { CacheView * image_view; const GradientInfo * gradient; const SegmentInfo * gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* * Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); gradient = (&draw_info->gradient); qsort(gradient->stops, gradient->number_stops, sizeof(StopInfo), StopInfoCompare); gradient_vector = (&gradient->gradient_vector); point.x = gradient_vector->x2 - gradient_vector->x1; point.y = gradient_vector->y2 - gradient_vector->y1; length = sqrt(point.x * point.x + point.y * point.y); bounding_box = gradient->bounding_box; status = MagickTrue; GetPixelInfo(image, &zero); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y = bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; register Quantum * magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } pixel = zero; composite = zero; offset = GetStopColorOffset(gradient, 0, y); if (gradient->type != RadialGradient) offset *= PerceptibleReciprocal(length); for (x = bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image, q, &pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset *= PerceptibleReciprocal(length); } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite = gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type != RadialGradient) offset *= PerceptibleReciprocal(length); } if (offset < 0.0) offset = (-offset); if ((ssize_t) fmod(offset, 2.0) == 0) offset = fmod(offset, 1.0); else offset = 1.0 - fmod(offset, 1.0); for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias = MagickFalse; repeat = 0.0; if ((x != (ssize_t) ceil(gradient_vector->x1 - 0.5)) || (y != (ssize_t) ceil(gradient_vector->y1 - 0.5))) { offset = GetStopColorOffset(gradient, x, y); if (gradient->type == LinearGradient) { repeat = fmod(offset, length); if (repeat < 0.0) repeat = length - fmod(-repeat, length); else repeat = fmod(offset, length); antialias = (repeat < length) && ((repeat + 1.0) > length) ? MagickTrue : MagickFalse; offset = PerceptibleReciprocal(length) * repeat; } else { repeat = fmod(offset, gradient->radius); if (repeat < 0.0) repeat = gradient->radius - fmod(-repeat, gradient->radius); else repeat = fmod(offset, gradient->radius); antialias = repeat + 1.0 > gradient->radius ? MagickTrue : MagickFalse; offset = repeat / gradient->radius; } } for (i = 0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite = gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite = gradient->stops[gradient->number_stops - 1].color; else { j = i; i--; alpha = (offset - gradient->stops[i].offset) / (gradient->stops[j].offset - gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha = length - repeat; else alpha = gradient->radius - repeat; i = 0; j = (ssize_t) gradient->number_stops - 1L; } CompositePixelInfoBlend(&gradient->stops[i].color, 1.0 - alpha, &gradient->stops[j].color, alpha, &composite); } break; } } CompositePixelInfoOver(&composite, composite.alpha, &pixel, pixel.alpha, &pixel); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawImage() draws a graphic primitive on your image. The * primitive % may be represented as a string or filename. Precede the * filename with an % "at" sign (@) and the contents of the file are drawn * on the image. You % can affect how text is drawn by setting one or more * members of the draw % info structure. % % The format of the DrawImage * method is: % % MagickBooleanType DrawImage(Image *image,const * DrawInfo *draw_info, % ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o exception: return any errors or warnings in this * structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo * mvg_info, const size_t pad) { double extent; size_t quantum; /* * Check if there is enough storage for drawing pimitives. */ extent = (double)mvg_info->offset + pad + PrimitiveExtentPad; quantum = sizeof(**mvg_info->primitive_info); if (((extent * quantum) < (double)SSIZE_MAX) && ((extent * quantum) < (double)GetMaxMemoryRequest())) { if (extent <= (double)*mvg_info->extent) return (MagickTrue); *mvg_info->primitive_info = (PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info, (size_t) extent, quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent = (size_t) extent; for (i = mvg_info->offset + 1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive = UndefinedPrimitive; return (MagickTrue); } } /* * Reallocation failed, allocate a primitive to facilitate unwinding. */ (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info = (PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info = (PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad * quantum); (void)memset(*mvg_info->primitive_info, 0, PrimitiveExtentPad * quantum); *mvg_info->extent = 1; return (MagickFalse); } MagickExport int MVGMacroCompare(const void *target, const void *source) { const char *p, *q; p = (const char *)target; q = (const char *)source; return (strcmp(p, q)); } static SplayTreeInfo * GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo * macros; /* * Scan graphic primitives for definitions and classes. */ if (primitive == (const char *)NULL) return ((SplayTreeInfo *) NULL); macros = NewSplayTree(MVGMacroCompare, RelinquishMagickMemory, RelinquishMagickMemory); macro = AcquireString(primitive); token = AcquireString(primitive); extent = strlen(token) + MagickPathExtent; for (q = primitive; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push", token) == 0) { register const char *end, *start; (void)GetNextToken(q, &q, extent, token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* * Named macro (e.g. push graphic-context "wheel"). */ (void)GetNextToken(q, &q, extent, token); start = q; end = q; (void)CopyMagickString(name, token, MagickPathExtent); n = 1; for (p = q; *p != '\0';) { if (GetNextToken(p, &p, extent, token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token, "pop") == 0) { end = p - strlen(token) - 1; n--; } if (LocaleCompare(token, "push") == 0) n++; if ((n == 0) && (end > start)) { /* * Extract macro. */ (void)GetNextToken(p, &p, extent, token); (void)CopyMagickString(macro, start, (size_t) (end - start)); (void)AddValueToSplayTree(macros, ConstantString(name), ConstantString(macro)); break; } } } } } token = DestroyString(token); macro = DestroyString(macro); return (macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value = StringToDouble(point, &p); return ((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo * primitive_info, const PointInfo point) { primitive_info->coordinates = 1; primitive_info->closed_subpath = MagickFalse; primitive_info->point = point; return (MagickTrue); } static MagickBooleanType RenderMVGContent(Image * image, const DrawInfo * draw_info, const size_t depth, ExceptionInfo * exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo * clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo * primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo * macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo * stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError, "VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *)NULL) || (*draw_info->primitive == '\0')) return (MagickFalse); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin draw-image"); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status = SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); if (status == MagickFalse) return (MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive + 1) != '-') && (depth == 0)) primitive = FileToString(draw_info->primitive + 1, ~0UL, exception); else primitive = AcquireString(draw_info->primitive); if (primitive == (char *)NULL) return (MagickFalse); primitive_extent = (double)strlen(primitive); (void)SetImageArtifact(image, "mvg:vector-graphics", primitive); n = 0; number_stops = 0; stops = (StopInfo *) NULL; /* * Allocate primitive info memory. */ graphic_context = (DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive = DestroyString(primitive); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } number_points = PrimitiveExtentPad; primitive_info = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive = DestroyString(primitive); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(primitive_info, 0, (size_t) number_points * sizeof(*primitive_info)); (void)memset(&mvg_info, 0, sizeof(mvg_info)); mvg_info.primitive_info = (&primitive_info); mvg_info.extent = (&number_points); mvg_info.exception = exception; graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, draw_info); graphic_context[n]->viewbox = image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width = image->columns; graphic_context[n]->viewbox.height = image->rows; } token = AcquireString(primitive); extent = strlen(token) + MagickPathExtent; defsDepth = 0; symbolDepth = 0; cursor = 0.0; macros = GetMVGMacros(primitive); status = MagickTrue; for (q = primitive; *q != '\0';) { /* * Interpret graphic primitive. */ if (GetNextToken(q, &q, MagickPathExtent, keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* * Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p = q - strlen(keyword) - 1; primitive_type = UndefinedPrimitive; current = graphic_context[n]->affine; GetAffineMatrix(&affine); *token = '\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.rx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.ry = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("alpha", keyword) == 0) { primitive_type = AlphaPrimitive; break; } if (LocaleCompare("arc", keyword) == 0) { primitive_type = ArcPrimitive; break; } status = MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier", keyword) == 0) { primitive_type = BezierPrimitive; break; } if (LocaleCompare("border-color", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->border_color, exception); break; } status = MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class", keyword) == 0) { const char *mvg_class; (void)GetNextToken(q, &q, extent, token); if (*token == '\0') { status = MagickFalse; break; } if (LocaleCompare(token, graphic_context[n]->id) == 0) break; mvg_class = (const char *)GetValueFromSplayTree(macros, token); if (mvg_class != (const char *)NULL) { char *elements; ssize_t offset; /* * Inject class elements in stream. */ offset = (ssize_t) (p - primitive); elements = AcquireString(primitive); elements[offset] = '\0'; (void)ConcatenateString(&elements, mvg_class); (void)ConcatenateString(&elements, "\n"); (void)ConcatenateString(&elements, q); primitive = DestroyString(primitive); primitive = elements; q = primitive + offset; } break; } if (LocaleCompare("clip-path", keyword) == 0) { const char *clip_path; /* * Take a node from within the MVG document, and * duplicate it here. */ (void)GetNextToken(q, &q, extent, token); if (*token == '\0') { status = MagickFalse; break; } (void)CloneString(&graphic_context[n]->clip_mask, token); clip_path = (const char *)GetValueFromSplayTree(macros, token); if (clip_path != (const char *)NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask = DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask = DrawClippingMask(image, graphic_context[n], token, clip_path, exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path = (const char *)GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *)NULL) (void)SetImageArtifact(image, graphic_context[n]->clip_mask, clip_path); status &= DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); } } break; } if (LocaleCompare("clip-rule", keyword) == 0) { ssize_t fill_rule; (void)GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) { status = MagickFalse; break; } graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("clip-units", keyword) == 0) { ssize_t clip_units; (void)GetNextToken(q, &q, extent, token); clip_units = ParseCommandOption(MagickClipPathOptions, MagickFalse, token); if (clip_units == -1) { status = MagickFalse; break; } graphic_context[n]->clip_units = (ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx = draw_info->bounds.x2; affine.sy = draw_info->bounds.y2; affine.tx = draw_info->bounds.x1; affine.ty = draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle", keyword) == 0) { primitive_type = CirclePrimitive; break; } if (LocaleCompare("color", keyword) == 0) { primitive_type = ColorPrimitive; break; } if (LocaleCompare("compliance", keyword) == 0) { /* * MVG compliance associates a clipping mask with an * image; SVG compliance associates a clipping mask with * a graphics context. */ (void)GetNextToken(q, &q, extent, token); graphic_context[n]->compliance = (ComplianceType) ParseCommandOption( MagickComplianceOptions, MagickFalse, token); break; } status = MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate", keyword) == 0) { ssize_t decorate; (void)GetNextToken(q, &q, extent, token); decorate = ParseCommandOption(MagickDecorateOptions, MagickFalse, token); if (decorate == -1) { status = MagickFalse; break; } graphic_context[n]->decorate = (DecorationType) decorate; break; } if (LocaleCompare("density", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->density, token); break; } if (LocaleCompare("direction", keyword) == 0) { ssize_t direction; (void)GetNextToken(q, &q, extent, token); direction = ParseCommandOption(MagickDirectionOptions, MagickFalse, token); if (direction == -1) status = MagickFalse; else graphic_context[n]->direction = (DirectionType) direction; break; } status = MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse", keyword) == 0) { primitive_type = EllipsePrimitive; break; } if (LocaleCompare("encoding", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->encoding, token); break; } status = MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->fill_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->fill, exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha = graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity", keyword) == 0) { double opacity; (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; opacity = MagickMin(MagickMax(factor * StringToDouble(token, &next_token), 0.0), 1.0); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha *= opacity; else graphic_context[n]->fill_alpha = QuantumRange * opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha = graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha = (MagickRealType) ClampToQuantum(QuantumRange * (1.0 - opacity)); break; } if (LocaleCompare("fill-rule", keyword) == 0) { ssize_t fill_rule; (void)GetNextToken(q, &q, extent, token); fill_rule = ParseCommandOption(MagickFillRuleOptions, MagickFalse, token); if (fill_rule == -1) { status = MagickFalse; break; } graphic_context[n]->fill_rule = (FillRule) fill_rule; break; } if (LocaleCompare("font", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->font, token); if (LocaleCompare("none", token) == 0) graphic_context[n]->font = (char *)RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->family, token); break; } if (LocaleCompare("font-size", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->pointsize = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("font-stretch", keyword) == 0) { ssize_t stretch; (void)GetNextToken(q, &q, extent, token); stretch = ParseCommandOption(MagickStretchOptions, MagickFalse, token); if (stretch == -1) { status = MagickFalse; break; } graphic_context[n]->stretch = (StretchType) stretch; break; } if (LocaleCompare("font-style", keyword) == 0) { ssize_t style; (void)GetNextToken(q, &q, extent, token); style = ParseCommandOption(MagickStyleOptions, MagickFalse, token); if (style == -1) { status = MagickFalse; break; } graphic_context[n]->style = (StyleType) style; break; } if (LocaleCompare("font-weight", keyword) == 0) { ssize_t weight; (void)GetNextToken(q, &q, extent, token); weight = ParseCommandOption(MagickWeightOptions, MagickFalse, token); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight = (size_t) weight; break; } status = MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("gravity", keyword) == 0) { ssize_t gravity; (void)GetNextToken(q, &q, extent, token); gravity = ParseCommandOption(MagickGravityOptions, MagickFalse, token); if (gravity == -1) { status = MagickFalse; break; } graphic_context[n]->gravity = (GravityType) gravity; break; } status = MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image", keyword) == 0) { ssize_t compose; primitive_type = ImagePrimitive; (void)GetNextToken(q, &q, extent, token); compose = ParseCommandOption(MagickComposeOptions, MagickFalse, token); if (compose == -1) { status = MagickFalse; break; } graphic_context[n]->compose = (CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->interline_spacing = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("interword-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->interword_spacing = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->kerning = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (IsPoint(token) == MagickFalse) break; clone_info = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n]); clone_info->text = AcquireString(" "); status &= GetTypeMetrics(image, clone_info, &metrics, exception); graphic_context[n]->kerning = metrics.width * StringToDouble(token, &next_token); clone_info = DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("line", keyword) == 0) { primitive_type = LinePrimitive; break; } status = MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask", keyword) == 0) { const char *mask_path; /* * Take a node from within the MVG document, and * duplicate it here. */ (void)GetNextToken(q, &q, extent, token); mask_path = (const char *)GetValueFromSplayTree(macros, token); if (mask_path != (const char *)NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask = DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask = DrawCompositeMask(image, graphic_context[n], token, mask_path, exception); if (graphic_context[n]->compliance != SVGCompliance) status = SetImageMask(image, CompositePixelMask, graphic_context[n]->composite_mask, exception); } break; } break; } case 'o': case 'O': { if (LocaleCompare("offset", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("opacity", keyword) == 0) { double opacity; (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; opacity = MagickMin(MagickMax(factor * StringToDouble(token, &next_token), 0.0), 1.0); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha *= opacity; graphic_context[n]->stroke_alpha *= opacity; } else { graphic_context[n]->fill_alpha = QuantumRange * opacity; graphic_context[n]->stroke_alpha = QuantumRange * opacity; } break; } status = MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path", keyword) == 0) { primitive_type = PathPrimitive; break; } if (LocaleCompare("point", keyword) == 0) { primitive_type = PointPrimitive; break; } if (LocaleCompare("polyline", keyword) == 0) { primitive_type = PolylinePrimitive; break; } if (LocaleCompare("polygon", keyword) == 0) { primitive_type = PolygonPrimitive; break; } if (LocaleCompare("pop", keyword) == 0) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare("class", token) == 0) break; if (LocaleCompare("clip-path", token) == 0) break; if (LocaleCompare("defs", token) == 0) { defsDepth--; graphic_context[n]->render = defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient", token) == 0) break; if (LocaleCompare("graphic-context", token) == 0) { if (n <= 0) { (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "UnbalancedGraphicContextPushPop", "`%s'", token); status = MagickFalse; n = 0; break; } if ((graphic_context[n]->clip_mask != (char *)NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0) status = SetImageMask(image, WritePixelMask, (Image *) NULL, exception); graphic_context[n] = DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask", token) == 0) break; if (LocaleCompare("pattern", token) == 0) break; if (LocaleCompare("symbol", token) == 0) { symbolDepth--; graphic_context[n]->render = symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status = MagickFalse; break; } if (LocaleCompare("push", keyword) == 0) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare("class", token) == 0) { /* * Class context. */ for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "class") != 0) continue; break; } (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("clip-path", token) == 0) { (void)GetNextToken(q, &q, extent, token); for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "clip-path") != 0) continue; break; } if ((q == (char *)NULL) || (p == (char *)NULL) || ((q - 4) < p)) { status = MagickFalse; break; } (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("defs", token) == 0) { defsDepth++; graphic_context[n]->render = defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void)GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); (void)GetNextToken(q, &q, extent, token); (void)CopyMagickString(type, token, MagickPathExtent); (void)GetNextToken(q, &q, extent, token); segment.x1 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); segment.y1 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); segment.x2 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); segment.y2 = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (LocaleCompare(type, "radial") == 0) { (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); } for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "gradient") != 0) continue; break; } if ((q == (char *)NULL) || (p == (char *)NULL) || ((q - 4) < p)) { status = MagickFalse; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); bounds.x1 = graphic_context[n]->affine.sx * segment.x1 + graphic_context[n]->affine.ry * segment.y1 + graphic_context[n]->affine.tx; bounds.y1 = graphic_context[n]->affine.rx * segment.x1 + graphic_context[n]->affine.sy * segment.y1 + graphic_context[n]->affine.ty; bounds.x2 = graphic_context[n]->affine.sx * segment.x2 + graphic_context[n]->affine.ry * segment.y2 + graphic_context[n]->affine.tx; bounds.y2 = graphic_context[n]->affine.rx * segment.x2 + graphic_context[n]->affine.sy * segment.y2 + graphic_context[n]->affine.ty; (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-type", name); (void)SetImageArtifact(image, key, type); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2 - bounds.x1 + 1.0), 1.0), MagickMax(fabs(bounds.y2 - bounds.y1 + 1.0), 1.0), bounds.x1, bounds.y1); (void)SetImageArtifact(image, key, geometry); (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("graphic-context", token) == 0) { n++; graphic_context = (DrawInfo **) ResizeQuantumMemory( graphic_context, (size_t) (n + 1), sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } graphic_context[n] = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n - 1]); if (*q == '"') { (void)GetNextToken(q, &q, extent, token); (void)CloneString(&graphic_context[n]->id, token); } break; } if (LocaleCompare("mask", token) == 0) { (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("pattern", token) == 0) { char key[2 * MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; (void)GetNextToken(q, &q, extent, token); (void)CopyMagickString(name, token, MagickPathExtent); (void)GetNextToken(q, &q, extent, token); bounds.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); bounds.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); bounds.width = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); bounds.height = (size_t) floor(StringToDouble(token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); for (p = q; *q != '\0';) { if (GetNextToken(q, &q, extent, token) < 1) break; if (LocaleCompare(token, "pop") != 0) continue; (void)GetNextToken(q, (const char **)NULL, extent, token); if (LocaleCompare(token, "pattern") != 0) continue; break; } if ((q == (char *)NULL) || (p == (char *)NULL) || ((q - 4) < p)) { status = MagickFalse; break; } (void)CopyMagickString(token, p, (size_t) (q - p - 4 + 1)); (void)FormatLocaleString(key, MagickPathExtent, "%s", name); (void)SetImageArtifact(image, key, token); (void)FormatLocaleString(key, MagickPathExtent, "%s-geometry", name); (void)FormatLocaleString(geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)bounds.width, (double) bounds.height, (double)bounds.x, (double)bounds.y); (void)SetImageArtifact(image, key, geometry); (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("symbol", token) == 0) { symbolDepth++; graphic_context[n]->render = symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status = MagickFalse; break; } status = MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle", keyword) == 0) { primitive_type = RectanglePrimitive; break; } if (LocaleCompare("rotate", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); affine.sx = cos(DegreesToRadians(fmod((double)angle, 360.0))); affine.rx = sin(DegreesToRadians(fmod((double)angle, 360.0))); affine.ry = (-sin(DegreesToRadians(fmod((double)angle, 360.0)))); affine.sy = cos(DegreesToRadians(fmod((double)angle, 360.0))); break; } if (LocaleCompare("roundRectangle", keyword) == 0) { primitive_type = RoundRectanglePrimitive; break; } status = MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); affine.sx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.sy = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("skewX", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); affine.ry = sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); affine.rx = (-tan(DegreesToRadians(angle) / 2.0)); break; } if (LocaleCompare("stop-color", keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops = (StopInfo *) AcquireQuantumMemory(2, sizeof(*stops)); else if (number_stops > 2) stops = (StopInfo *) ResizeQuantumMemory(stops, number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } (void)GetNextToken(q, &q, extent, token); status &= QueryColorCompliance(token, AllCompliance, &stop_color, exception); stops[number_stops - 1].color = stop_color; (void)GetNextToken(q, &q, extent, token); factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; stops[number_stops - 1].offset = factor * StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("stroke", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; (void)FormatLocaleString(pattern, MagickPathExtent, "%s", token); if (GetImageArtifact(image, pattern) != (const char *)NULL) (void)DrawPatternPath(image, draw_info, token, &graphic_context[n]->stroke_pattern, exception); else { status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->stroke, exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha = graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->stroke_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray", keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *)NULL) graphic_context[n]->dash_pattern = (double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r = q; (void)GetNextToken(r, &r, extent, token); if (*token == ',') (void)GetNextToken(r, &r, extent, token); for (x = 0; IsPoint(token) != MagickFalse; x++) { (void)GetNextToken(r, &r, extent, token); if (*token == ',') (void)GetNextToken(r, &r, extent, token); } graphic_context[n]->dash_pattern = (double *) AcquireQuantumMemory((size_t) (2 * x + 2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); status = MagickFalse; break; } (void)memset(graphic_context[n]->dash_pattern, 0, (size_t) (2 * x + 2) * sizeof(*graphic_context[n]->dash_pattern)); for (j = 0; j < x; j++) { (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->dash_pattern[j] = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status = MagickFalse; } if ((x & 0x01) != 0) for (; j < (2 * x); j++) graphic_context[n]->dash_pattern[j] = graphic_context[n]->dash_pattern[j - x]; graphic_context[n]->dash_pattern[j] = 0.0; break; } (void)GetNextToken(q, &q, extent, token); break; } if (LocaleCompare("stroke-dashoffset", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->dash_offset = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } if (LocaleCompare("stroke-linecap", keyword) == 0) { ssize_t linecap; (void)GetNextToken(q, &q, extent, token); linecap = ParseCommandOption(MagickLineCapOptions, MagickFalse, token); if (linecap == -1) { status = MagickFalse; break; } graphic_context[n]->linecap = (LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin", keyword) == 0) { ssize_t linejoin; (void)GetNextToken(q, &q, extent, token); linejoin = ParseCommandOption(MagickLineJoinOptions, MagickFalse, token); if (linejoin == -1) { status = MagickFalse; break; } graphic_context[n]->linejoin = (LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->miterlimit = StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity", keyword) == 0) { double opacity; (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; factor = strchr(token, '%') != (char *)NULL ? 0.01 : 1.0; opacity = MagickMin(MagickMax(factor * StringToDouble(token, &next_token), 0.0), 1.0); if (token == next_token) ThrowPointExpectedException(token, exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha *= opacity; else graphic_context[n]->stroke_alpha = QuantumRange * opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha = graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha = (MagickRealType) ClampToQuantum(QuantumRange * (1.0 - opacity)); break; } if (LocaleCompare("stroke-width", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text", keyword) == 0) { primitive_type = TextPrimitive; cursor = 0.0; break; } if (LocaleCompare("text-align", keyword) == 0) { ssize_t align; (void)GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) { status = MagickFalse; break; } graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-anchor", keyword) == 0) { ssize_t align; (void)GetNextToken(q, &q, extent, token); align = ParseCommandOption(MagickAlignOptions, MagickFalse, token); if (align == -1) { status = MagickFalse; break; } graphic_context[n]->align = (AlignType) align; break; } if (LocaleCompare("text-antialias", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->text_antialias = StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); status &= QueryColorCompliance(token, AllCompliance, &graphic_context[n]->undercolor, exception); break; } if (LocaleCompare("translate", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); affine.tx = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); affine.ty = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); cursor = 0.0; break; } status = MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use", keyword) == 0) { const char *use; /* * Get a macro from the MVG document, and "use" it here. */ (void)GetNextToken(q, &q, extent, token); use = (const char *)GetValueFromSplayTree(macros, token); if (use != (const char *)NULL) { clone_info = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n]); (void)CloneString(&clone_info->primitive, use); status = RenderMVGContent(image, clone_info, depth + 1, exception); clone_info = DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.x = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.y = (ssize_t) ceil(StringToDouble(token, &next_token) - 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.width = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); graphic_context[n]->viewbox.height = (size_t) floor(StringToDouble( token, &next_token) + 0.5); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing", keyword) == 0) { (void)GetNextToken(q, &q, extent, token); graphic_context[n]->interword_spacing = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); break; } status = MagickFalse; break; } default: { status = MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx - 1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy - 1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx = current.sx * affine.sx + current.ry * affine.rx; graphic_context[n]->affine.rx = current.rx * affine.sx + current.sy * affine.rx; graphic_context[n]->affine.ry = current.sx * affine.ry + current.ry * affine.sy; graphic_context[n]->affine.sy = current.rx * affine.ry + current.sy * affine.sy; graphic_context[n]->affine.tx = current.sx * affine.tx + current.ry * affine.ty + current.tx; graphic_context[n]->affine.ty = current.rx * affine.tx + current.sy * affine.ty + current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type = LinearGradient; if (draw_info->gradient.type == RadialGradient) type = RadialGradient; (void)GradientImage(image, type, PadSpread, stops, number_stops, exception); } if (number_stops > 0) stops = (StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int) (q - p - 1), p); continue; } /* * Parse the primitive attributes. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *)NULL) primitive_info[i].text = DestroyString(primitive_info[i].text); i = 0; mvg_info.offset = i; j = 0; primitive_info[0].point.x = 0.0; primitive_info[0].point.y = 0.0; primitive_info[0].coordinates = 0; primitive_info[0].method = FloodfillMethod; primitive_info[0].closed_subpath = MagickFalse; for (x = 0; *q != '\0'; x++) { /* * Define points. */ if (IsPoint(q) == MagickFalse) break; (void)GetNextToken(q, &q, extent, token); point.x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, &q, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); point.y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(q, (const char **)NULL, extent, token); if (*token == ',') (void)GetNextToken(q, &q, extent, token); primitive_info[i].primitive = primitive_type; primitive_info[i].point = point; primitive_info[i].coordinates = 0; primitive_info[i].method = FloodfillMethod; primitive_info[i].closed_subpath = MagickFalse; i++; mvg_info.offset = i; if (i < (ssize_t) number_points) continue; status &= CheckPrimitiveExtent(&mvg_info, number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *)NULL) primitive_info[j].text = DestroyString(primitive_info[j].text); primitive_info[j].primitive = primitive_type; primitive_info[j].coordinates = (size_t) x; primitive_info[j].method = FloodfillMethod; primitive_info[j].closed_subpath = MagickFalse; /* * Circumscribe primitive within a circle. */ bounds.x1 = primitive_info[j].point.x; bounds.y1 = primitive_info[j].point.y; bounds.x2 = primitive_info[j].point.x; bounds.y2 = primitive_info[j].point.y; for (k = 1; k < (ssize_t) primitive_info[j].coordinates; k++) { point = primitive_info[j + k].point; if (point.x < bounds.x1) bounds.x1 = point.x; if (point.y < bounds.y1) bounds.y1 = point.y; if (point.x > bounds.x2) bounds.x2 = point.x; if (point.y > bounds.y2) bounds.y2 = point.y; } /* * Speculate how many points our primitive might consume. */ coordinates = (double)primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates *= 5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot((double)alpha, (double)beta); coordinates *= 5.0; coordinates += 2.0 * ((size_t) ceil((double)MagickPI * radius)) + 6.0 * BezierQuantum + 360.0; break; } case BezierPrimitive: { coordinates = (double)(BezierQuantum * primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107 * BezierQuantum)) { (void)ThrowMagickException(exception, GetMagickModule(), DrawError, "TooManyBezierCoordinates", "`%s'", token); status = MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void)GetNextToken(q, &q, extent, token); coordinates = 1.0; t = token; for (s = token; *s != '\0'; s = t) { double value; value = StringToDouble(s, &t); (void)value; if (s == t) { t++; continue; } coordinates++; } for (s = token; *s != '\0'; s++) if (strspn(s, "AaCcQqSsTt") != 0) coordinates += (20.0 * BezierQuantum) + 360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha = bounds.x2 - bounds.x1; beta = bounds.y2 - bounds.y1; radius = hypot(alpha, beta); coordinates = 2.0 * (ceil(MagickPI * radius)) + 6.0 * BezierQuantum + 360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i + coordinates)) >= number_points) { /* * Resize based on speculative points required by primitive. */ number_points += coordinates + 1; if (number_points < (size_t) coordinates) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); break; } mvg_info.offset = i; status &= CheckPrimitiveExtent(&mvg_info, number_points); } status &= CheckPrimitiveExtent(&mvg_info, PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset = j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } status &= TracePoint(primitive_info + j, primitive_info[j].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } status &= TraceLine(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } status &= TraceRectangle(primitive_info + j, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } if ((primitive_info[j + 2].point.x < 0.0) || (primitive_info[j + 2].point.y < 0.0)) { status = MagickFalse; break; } if ((primitive_info[j + 1].point.x - primitive_info[j].point.x) < 0.0) { status = MagickFalse; break; } if ((primitive_info[j + 1].point.y - primitive_info[j].point.y) < 0.0) { status = MagickFalse; break; } status &= TraceRoundRectangle(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type = UndefinedPrimitive; break; } status &= TraceArc(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status = MagickFalse; break; } if ((primitive_info[j + 1].point.x < 0.0) || (primitive_info[j + 1].point.y < 0.0)) { status = MagickFalse; break; } status &= TraceEllipse(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point, primitive_info[j + 2].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } status &= TraceCircle(&mvg_info, primitive_info[j].point, primitive_info[j + 1].point); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status = MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status = MagickFalse; break; } primitive_info[i] = primitive_info[j]; primitive_info[i].coordinates = 0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath = MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status = MagickFalse; break; } status &= TraceBezier(&mvg_info, primitive_info[j].coordinates); i = (ssize_t) (j + primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates = (double)TracePath(&mvg_info, token, exception); if (coordinates < 0.0) { status = MagickFalse; break; } i = (ssize_t) (j + coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } (void)GetNextToken(q, &q, extent, token); method = ParseCommandOption(MagickMethodOptions, MagickFalse, token); if (method == -1) { status = MagickFalse; break; } primitive_info[j].method = (PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status = MagickFalse; break; } if (*token != ',') (void)GetNextToken(q, &q, extent, token); (void)CloneString(&primitive_info[j].text, token); /* * Compute text cursor offset. */ clone_info = CloneDrawInfo((ImageInfo *) NULL, graphic_context[n]); if ((fabs(mvg_info.point.x - primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y - primitive_info->point.y) < MagickEpsilon)) { mvg_info.point = primitive_info->point; primitive_info->point.x += cursor; } else { mvg_info.point = primitive_info->point; cursor = 0.0; } (void)FormatLocaleString(geometry, MagickPathExtent, "%+f%+f", primitive_info->point.x, primitive_info->point.y); clone_info->render = MagickFalse; clone_info->text = AcquireString(token); status &= GetTypeMetrics(image, clone_info, &metrics, exception); clone_info = DestroyDrawInfo(clone_info); cursor += metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor = 0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status = MagickFalse; break; } (void)GetNextToken(q, &q, extent, token); (void)CloneString(&primitive_info[j].text, token); break; } } mvg_info.offset = i; if ((image->debug != MagickFalse) && (q > p)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.*s", (int)(q - p - 1), p); if (status == MagickFalse) break; primitive_info[i].primitive = UndefinedPrimitive; if (i == 0) continue; /* * Transform points. */ for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; primitive_info[i].point.x = graphic_context[n]->affine.sx * point.x + graphic_context[n]->affine.ry * point.y + graphic_context[n]->affine.tx; primitive_info[i].point.y = graphic_context[n]->affine.rx * point.x + graphic_context[n]->affine.sy * point.y + graphic_context[n]->affine.ty; point = primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1 = point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1 = point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2 = point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2 = point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError, "MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *)NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n - 1]->clip_mask) != 0)) { const char *clip_path; clip_path = (const char *)GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *)NULL) (void)SetImageArtifact(image, graphic_context[n]->clip_mask, clip_path); status &= DrawClipPath(image, graphic_context[n], graphic_context[n]->clip_mask, exception); } status &= DrawPrimitive(image, graphic_context[n], primitive_info, exception); } proceed = SetImageProgress(image, RenderImageTag, q - primitive, (MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end draw-image"); /* * Relinquish resources. */ macros = DestroySplayTree(macros); token = DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *)NULL) primitive_info[i].text = DestroyString(primitive_info[i].text); primitive_info = (PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive = DestroyString(primitive); if (stops != (StopInfo *) NULL) stops = (StopInfo *) RelinquishMagickMemory(stops); for (; n >= 0; n--) graphic_context[n] = DestroyDrawInfo(graphic_context[n]); graphic_context = (DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError, "NonconformingDrawingPrimitiveDefinition", keyword); return (status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image * image, const DrawInfo * draw_info, ExceptionInfo * exception) { return (RenderMVGContent(image, draw_info, 0, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P a t t e r n P a t h * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPatternPath() draws a pattern. % % The format of the * DrawPatternPath method is: % % MagickBooleanType * DrawPatternPath(Image *image,const DrawInfo *draw_info, % const * char *name,Image **pattern,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o draw_info: the * draw info. % % o name: the pattern name. % % o image: the image. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image * image, const DrawInfo * draw_info, const char *name, Image ** pattern, ExceptionInfo * exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo * clone_info; ImageInfo * image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (const DrawInfo *)NULL); assert(name != (const char *)NULL); (void)FormatLocaleString(property, MagickPathExtent, "%s", name); path = GetImageArtifact(image, property); if (path == (const char *)NULL) return (MagickFalse); (void)FormatLocaleString(property, MagickPathExtent, "%s-geometry", name); geometry = GetImageArtifact(image, property); if (geometry == (const char *)NULL) return (MagickFalse); if ((*pattern) != (Image *) NULL) *pattern = DestroyImage(*pattern); image_info = AcquireImageInfo(); image_info->size = AcquireString(geometry); *pattern = AcquireImage(image_info, exception); image_info = DestroyImageInfo(image_info); (void)QueryColorCompliance("#00000000", AllCompliance, &(*pattern)->background_color, exception); (void)SetImageBackgroundColor(*pattern, exception); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "begin pattern-path %s %s", name, geometry); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill_pattern = NewImageList(); clone_info->stroke_pattern = NewImageList(); (void)FormatLocaleString(property, MagickPathExtent, "%s-type", name); type = GetImageArtifact(image, property); if (type != (const char *)NULL) clone_info->gradient.type = (GradientType) ParseCommandOption( MagickGradientOptions, MagickFalse, type); (void)CloneString(&clone_info->primitive, path); status = RenderMVGContent(*pattern, clone_info, 0, exception); clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), "end pattern-path"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w P o l y g o n P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The * format of the DrawPolygonPrimitive method is: % % MagickBooleanType * DrawPolygonPrimitive(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o draw_info: the draw info. % % o primitive_info: * Specifies a pointer to a PrimitiveInfo structure. % % o exception: * return any errors or warnings in this structure. % */ static PolygonInfo ** DestroyPolygonThreadSet(PolygonInfo ** polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i = 0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i] = DestroyPolygonInfo(polygon_info[i]); polygon_info = (PolygonInfo **) RelinquishMagickMemory(polygon_info); return (polygon_info); } static PolygonInfo ** AcquirePolygonThreadSet( const PrimitiveInfo * primitive_info) { PathInfo * magick_restrict path_info; PolygonInfo ** polygon_info; register ssize_t i; size_t number_threads; number_threads = (size_t) GetMagickResourceLimit(ThreadResource); polygon_info = (PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return ((PolygonInfo **) NULL); (void)memset(polygon_info, 0, number_threads * sizeof(*polygon_info)); path_info = ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); for (i = 0; i < (ssize_t) number_threads; i++) { polygon_info[i] = ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return (DestroyPolygonThreadSet(polygon_info)); } path_info = (PathInfo *) RelinquishMagickMemory(path_info); return (polygon_info); } static double GetFillAlpha(PolygonInfo * polygon_info, const double mid, const MagickBooleanType fill, const FillRule fill_rule, const ssize_t x, const ssize_t y, double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo * q; register EdgeInfo * p; register ssize_t i; ssize_t j, winding_number; /* * Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha = 0.0; subpath_alpha = 0.0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= (p->bounds.y1 - mid - 0.5)) break; if ((double)y > (p->bounds.y2 + mid + 0.5)) { (void)DestroyEdge(polygon_info, (size_t) j); continue; } if (((double)x <= (p->bounds.x1 - mid - 0.5)) || ((double)x > (p->bounds.x2 + mid + 0.5))) continue; i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) p->number_points; i++) { if ((double)y <= (p->points[i - 1].y - mid - 0.5)) break; if ((double)y > (p->points[i].y + mid + 0.5)) continue; if (p->scanline != (double)y) { p->scanline = (double)y; p->highwater = (size_t) i; } /* * Compute distance between a point and an edge. */ q = p->points + i - 1; delta.x = (q + 1)->x - q->x; delta.y = (q + 1)->y - q->y; beta = delta.x * (x - q->x) + delta.y * (y - q->y); if (beta <= 0.0) { delta.x = (double)x - q->x; delta.y = (double)y - q->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = delta.x * delta.x + delta.y * delta.y; if (beta >= alpha) { delta.x = (double)x - (q + 1)->x; delta.y = (double)y - (q + 1)->y; distance = delta.x * delta.x + delta.y * delta.y; } else { alpha = PerceptibleReciprocal(alpha); beta = delta.x * (y - q->y) - delta.y * (x - q->x) + MagickEpsilon; distance = alpha * beta * beta; } } /* * Compute stroke & subpath opacity. */ beta = 0.0; if (p->ghostline == MagickFalse) { alpha = mid + 0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha + 0.25) * (alpha + 0.25)))) { alpha = mid - 0.5; if (distance <= ((alpha + 0.25) * (alpha + 0.25))) *stroke_alpha = 1.0; else { beta = 1.0; if (fabs(distance - 1.0) >= MagickEpsilon) beta = sqrt((double)distance); alpha = beta - mid - 0.5; if (*stroke_alpha < ((alpha - 0.25) * (alpha - 0.25))) *stroke_alpha = (alpha - 0.25) * (alpha - 0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha = 1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta = 1.0; if (fabs(distance - 1.0) >= MagickEpsilon) beta = sqrt(distance); } alpha = beta - 1.0; if (subpath_alpha < (alpha * alpha)) subpath_alpha = alpha * alpha; } } /* * Compute fill opacity. */ if (fill == MagickFalse) return (0.0); if (subpath_alpha >= 1.0) return (1.0); /* * Determine winding number. */ winding_number = 0; p = polygon_info->edges; for (j = 0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double)y <= p->bounds.y1) break; if (((double)y > p->bounds.y2) || ((double)x <= p->bounds.x1)) continue; if ((double)x > p->bounds.x2) { winding_number += p->direction ? 1 : -1; continue; } i = (ssize_t) MagickMax((double)p->highwater, 1.0); for (; i < (ssize_t) (p->number_points - 1); i++) if ((double)y <= p->points[i].y) break; q = p->points + i - 1; if ((((q + 1)->x - q->x) * (y - q->y)) <= (((q + 1)->y - q->y) * (x - q->x))) winding_number += p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return (1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return (1.0); return (subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickBooleanType fill, status; double mid; PolygonInfo ** magick_restrict polygon_info; register EdgeInfo * p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return (MagickTrue); /* * Compute bounding box. */ polygon_info = AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return (MagickFalse); DisableMSCWarning(4127) if (0) { status = DrawBoundingRectangles(image, draw_info, polygon_info[0], exception); if (status == MagickFalse) { polygon_info = DestroyPolygonThreadSet(polygon_info); return (status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-polygon"); fill = (primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid = ExpandAffine(&draw_info->affine) * SaneStrokeWidth(image, draw_info) / 2.0; bounds = polygon_info[0]->edges[0].bounds; for (i = 1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p = polygon_info[0]->edges + i; if (p->bounds.x1 < bounds.x1) bounds.x1 = p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1 = p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2 = p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2 = p->bounds.y2; } bounds.x1 -= (mid + 1.0); bounds.y1 -= (mid + 1.0); bounds.x2 += (mid + 1.0); bounds.y2 += (mid + 1.0); if ((bounds.x1 >= (double)image->columns) || (bounds.y1 >= (double)image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info = DestroyPolygonThreadSet(polygon_info); return (MagickTrue); /* virtual polygon */ } bounds.x1 = bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)image->columns - 1.0 ? (double)image->columns - 1.0 : bounds.x1; bounds.y1 = bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)image->rows - 1.0 ? (double)image->rows - 1.0 : bounds.y1; bounds.x2 = bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)image->columns - 1.0 ? (double)image->columns - 1.0 : bounds.x2; bounds.y2 = bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)image->rows - 1.0 ? (double)image->rows - 1.0 : bounds.y2; status = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* * Draw point. */ start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y = start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum * magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); x = start_x; q = GetCacheViewAuthenticPixels(image_view, x, y, (size_t) (stop_x - x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } GetPixelInfo(image, &pixel); for (; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x - 0.5)) && (y == (ssize_t) ceil(primitive_info->point.y - 0.5))) { GetFillColor(draw_info, x - start_x, y - start_y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); } q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * Draw polygon or line. */ start_y = (ssize_t) ceil(bounds.y1 - 0.5); stop_y = (ssize_t) floor(bounds.y2 + 0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y = start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum * magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x = (ssize_t) ceil(bounds.x1 - 0.5); stop_x = (ssize_t) floor(bounds.x2 + 0.5); q = GetCacheViewAuthenticPixels(image_view, start_x, y, (size_t) (stop_x - start_x + 1), 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* * Fill and/or stroke. */ fill_alpha = GetFillAlpha(polygon_info[id], mid, fill, draw_info->fill_rule, x, y, &stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha = fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha = stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info, x - start_x, y - start_y, &fill_color, exception); CompositePixelOver(image, &fill_color, fill_alpha * fill_color.alpha, q, (double)GetPixelAlpha(image, q), q); GetStrokeColor(draw_info, x - start_x, y - start_y, &stroke_color, exception); CompositePixelOver(image, &stroke_color, stroke_alpha * stroke_color.alpha, q, (double)GetPixelAlpha(image, q), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); polygon_info = DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-polygon"); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D r a w P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on * the image. % % The format of the DrawPrimitive method is: % % * MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % * PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o draw_info: * the draw info. % % o primitive_info: Specifies a pointer to a * PrimitiveInfo structure. % % o exception: return any errors or warnings * in this structure. % */ static inline double ConstrainCoordinate(double x) { if (x < (double)-(SSIZE_MAX - 512)) return ((double)-(SSIZE_MAX - 512)); if (x > (double)(SSIZE_MAX - 512)) return ((double)(SSIZE_MAX - 512)); return (x); } static void LogPrimitiveInfo(const PrimitiveInfo * primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x = (ssize_t) ceil(primitive_info->point.x - 0.5); y = (ssize_t) ceil(primitive_info->point.y - 0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ColorPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "ImagePrimitive %.20g,%.20g", (double)x, (double)y); return; } case PointPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "PointPrimitive %.20g,%.20g %s", (double)x, (double)y, methods[primitive_info->method]); return; } case TextPrimitive: { (void)LogMagickEvent(DrawEvent, GetMagickModule(), "TextPrimitive %.20g,%.20g", (double)x, (double)y); return; } default: break; } coordinates = 0; p = primitive_info[0].point; q.x = (-1.0); q.y = (-1.0); for (i = 0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point = primitive_info[i].point; if (coordinates <= 0) { coordinates = (ssize_t) primitive_info[i].coordinates; (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin open (%.20g)", (double)coordinates); p = point; } point = primitive_info[i].point; if ((fabs(q.x - point.x) >= MagickEpsilon) || (fabs(q.y - point.y) >= MagickEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %.18g,%.18g", (double)coordinates, point.x, point.y); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " %.20g: %g %g (duplicate)", (double)coordinates, point.x, point.y); q = point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x - point.x) >= MagickEpsilon) || (fabs(p.y - point.y) >= MagickEpsilon)) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end last (%.20g)", (double)coordinates); else (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end open (%.20g)", (double)coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { CacheView * image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-primitive"); (void)LogMagickEvent(DrawEvent, GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g", draw_info->affine.sx, draw_info->affine.rx, draw_info->affine.ry, draw_info->affine.sy, draw_info->affine.tx, draw_info->affine.ty); } status = MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status &= SetImageColorspace(image, sRGBColorspace, exception); if (draw_info->compliance == SVGCompliance) { status &= SetImageMask(image, WritePixelMask, draw_info->clipping_mask, exception); status &= SetImageMask(image, CompositePixelMask, draw_info->composite_mask, exception); } x = (ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x - 0.5)); y = (ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y - 0.5)); image_view = AcquireAuthenticCacheView(image, exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status &= SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); status &= SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status &= GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status &= GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } channel_mask = SetImageChannelMask(image, AlphaChannel); status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); (void)SetImageChannelMask(image, channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum * q; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image, &pixel); GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); status &= SyncCacheViewAuthenticPixels(image_view, exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status &= GetOneCacheViewVirtualPixelInfo(image_view, x, y, &target, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image, q, &pixel); if (IsFuzzyEquivalencePixelInfo(&pixel, &target) == MagickFalse) { q += GetPixelChannels(image); continue; } GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status &= GetOneVirtualPixelInfo(image, TileVirtualPixelMethod, x, y, &target, exception); if (primitive_info->method == FillToBorderMethod) { target.red = (double)draw_info->border_color.red; target.green = (double)draw_info->border_color.green; target.blue = (double)draw_info->border_color.blue; } status &= FloodfillPaintImage(image, draw_info, &target, x, y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue, exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image, &pixel); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info, x, y, &pixel, exception); SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } status &= SyncCacheViewAuthenticPixels(image_view, exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image * composite_image, *composite_images; ImageInfo * clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *)NULL) break; clone_info = AcquireImageInfo(); composite_images = (Image *) NULL; if (LocaleNCompare(primitive_info->text, "data:", 5) == 0) composite_images = ReadInlineImage(clone_info, primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void)CopyMagickString(clone_info->filename, primitive_info->text, MagickPathExtent); composite_images = ReadImage(clone_info, exception); } clone_info = DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status = MagickFalse; break; } composite_image = RemoveFirstImageFromList(&composite_images); composite_images = DestroyImageList(composite_images); (void)SetImageProgressMonitor(composite_image, (MagickProgressMonitor) NULL, (void *)NULL); x1 = (ssize_t) ceil(primitive_info[1].point.x - 0.5); y1 = (ssize_t) ceil(primitive_info[1].point.y - 0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* * Resize image. */ (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%gx%g!", primitive_info[1].point.x, primitive_info[1].point.y); composite_image->filter = image->filter; status &= TransformImage(&composite_image, (char *)NULL, composite_geometry, exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status &= SetImageAlphaChannel(composite_image, OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status &= SetImageAlpha(composite_image, draw_info->alpha, exception); SetGeometry(image, &geometry); image->gravity = draw_info->gravity; geometry.x = x; geometry.y = y; (void)FormatLocaleString(composite_geometry, MagickPathExtent, "%.20gx%.20g%+.20g%+.20g", (double)composite_image->columns, (double) composite_image->rows, (double)geometry.x, (double)geometry.y); (void)ParseGravityGeometry(image, composite_geometry, &geometry, exception); affine = draw_info->affine; affine.tx = (double)geometry.x; affine.ty = (double)geometry.y; composite_image->interpolate = image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status &= DrawAffineImage(image, composite_image, &affine, exception); else status &= CompositeImage(image, composite_image, draw_info->compose, MagickTrue, geometry.x, geometry.y, exception); composite_image = DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum * q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q = GetCacheViewAuthenticPixels(image_view, x, y, 1, 1, exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info, x, y, &fill_color, exception); CompositePixelOver(image, &fill_color, (double)fill_color.alpha, q, (double) GetPixelAlpha(image, q), q); status &= SyncCacheViewAuthenticPixels(image_view, exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo * clone_info; if (primitive_info->text == (char *)NULL) break; clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); (void)CloneString(&clone_info->text, primitive_info->text); (void)FormatLocaleString(geometry, MagickPathExtent, "%+f%+f", primitive_info->point.x, primitive_info->point.y); (void)CloneString(&clone_info->geometry, geometry); status &= AnnotateImage(image, clone_info, exception); clone_info = DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo * clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale = ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *)NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale * draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* * Draw dash polygon. */ clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); if (status != MagickFalse) status &= DrawDashPolygon(draw_info, primitive_info, image, exception); break; } mid = ExpandAffine(&draw_info->affine) * SaneStrokeWidth(image, draw_info) / 2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* * Draw strokes while respecting line cap/join attributes. */ closed_path = primitive_info[0].closed_subpath; i = (ssize_t) primitive_info[0].coordinates; x = fabs(primitive_info[i - 1].point.x - primitive_info[0].point.x); y = fabs(primitive_info[i - 1].point.y - primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path = MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status &= DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->stroke_width = 0.0; clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; status &= DrawPolygonPrimitive(image, clone_info, primitive_info, exception); clone_info = DestroyDrawInfo(clone_info); if (status != MagickFalse) status &= DrawStrokePolygon(image, draw_info, primitive_info, exception); break; } status &= DrawPolygonPrimitive(image, draw_info, primitive_info, exception); break; } } image_view = DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status &= SetImageMask(image, WritePixelMask, (Image *) NULL, exception); status &= SetImageMask(image, CompositePixelMask, (Image *) NULL, exception); } if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-primitive"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + D r a w S t r o k e P o l y g o n * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, * ellipse) on % the image while respecting the line cap and join * attributes. % % The format of the DrawStrokePolygon method is: % % * MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo * *draw_info,const PrimitiveInfo *primitive_info) % % A description of each * parameter follows: % % o image: the image. % % o draw_info: the draw * info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo * structure. % % */ static MagickBooleanType DrawRoundLinecap(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i = 0; i < 4; i++) linecap[i] = (*primitive_info); linecap[0].coordinates = 4; linecap[1].point.x += 2.0 * MagickEpsilon; linecap[2].point.x += 2.0 * MagickEpsilon; linecap[2].point.y += 2.0 * MagickEpsilon; linecap[3].point.y += 2.0 * MagickEpsilon; linecap[4].primitive = UndefinedPrimitive; return (DrawPolygonPrimitive(image, draw_info, linecap, exception)); } static MagickBooleanType DrawStrokePolygon(Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info, ExceptionInfo * exception) { DrawInfo * clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo * stroke_polygon; register const PrimitiveInfo * p, *q; /* * Draw stroked polygon. */ if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " begin draw-stroke-polygon"); clone_info = CloneDrawInfo((ImageInfo *) NULL, draw_info); clone_info->fill = draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern = DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern = CloneImage(clone_info->stroke_pattern, 0, 0, MagickTrue, exception); clone_info->stroke.alpha = (MagickRealType) TransparentAlpha; clone_info->stroke_width = 0.0; clone_info->fill_rule = NonZeroRule; status = MagickTrue; for (p = primitive_info; p->primitive != UndefinedPrimitive; p += p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon = TraceStrokePolygon(image, draw_info, p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status = 0; break; } status &= DrawPolygonPrimitive(image, clone_info, stroke_polygon, exception); stroke_polygon = (PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q = p + p->coordinates - 1; closed_path = p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status &= DrawRoundLinecap(image, draw_info, p, exception); status &= DrawRoundLinecap(image, draw_info, q, exception); } } clone_info = DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void)LogMagickEvent(DrawEvent, GetMagickModule(), " end draw-stroke-polygon"); return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t A f f i n e M a t r i x * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the * identity % matrix. % % The format of the GetAffineMatrix method is: % % * void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of * each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix * affine_matrix) { (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(affine_matrix != (AffineMatrix *) NULL); (void)memset(affine_matrix, 0, sizeof(*affine_matrix)); affine_matrix->sx = 1.0; affine_matrix->sy = 1.0; } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + G e t D r a w I n f o * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetDrawInfo() initializes draw_info to default values from * image_info. % % The format of the GetDrawInfo method is: % % void * GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A * description of each parameter follows: % % o image_info: the image * info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo * image_info, DrawInfo * draw_info) { char *next_token; const char *option; ExceptionInfo * exception; ImageInfo * clone_info; /* * Initialize draw attributes. */ (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(draw_info != (DrawInfo *) NULL); (void)memset(draw_info, 0, sizeof(*draw_info)); clone_info = CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception = AcquireExceptionInfo(); (void)QueryColorCompliance("#000F", AllCompliance, &draw_info->fill, exception); (void)QueryColorCompliance("#FFF0", AllCompliance, &draw_info->stroke, exception); draw_info->stroke_antialias = clone_info->antialias; draw_info->stroke_width = 1.0; draw_info->fill_rule = EvenOddRule; draw_info->alpha = OpaqueAlpha; draw_info->fill_alpha = OpaqueAlpha; draw_info->stroke_alpha = OpaqueAlpha; draw_info->linecap = ButtCap; draw_info->linejoin = MiterJoin; draw_info->miterlimit = 10; draw_info->decorate = NoDecoration; draw_info->pointsize = 12.0; draw_info->undercolor.alpha = (MagickRealType) TransparentAlpha; draw_info->compose = OverCompositeOp; draw_info->render = MagickTrue; draw_info->clip_path = MagickFalse; draw_info->debug = IsEventLogging(); if (clone_info->font != (char *)NULL) draw_info->font = AcquireString(clone_info->font); if (clone_info->density != (char *)NULL) draw_info->density = AcquireString(clone_info->density); draw_info->text_antialias = clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize = clone_info->pointsize; draw_info->border_color = clone_info->border_color; if (clone_info->server_name != (char *)NULL) draw_info->server_name = AcquireString(clone_info->server_name); option = GetImageOption(clone_info, "direction"); if (option != (const char *)NULL) draw_info->direction = (DirectionType) ParseCommandOption( MagickDirectionOptions, MagickFalse, option); else draw_info->direction = UndefinedDirection; option = GetImageOption(clone_info, "encoding"); if (option != (const char *)NULL) (void)CloneString(&draw_info->encoding, option); option = GetImageOption(clone_info, "family"); if (option != (const char *)NULL) (void)CloneString(&draw_info->family, option); option = GetImageOption(clone_info, "fill"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->fill, exception); option = GetImageOption(clone_info, "gravity"); if (option != (const char *)NULL) draw_info->gravity = (GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse, option); option = GetImageOption(clone_info, "interline-spacing"); if (option != (const char *)NULL) draw_info->interline_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "interword-spacing"); if (option != (const char *)NULL) draw_info->interword_spacing = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "kerning"); if (option != (const char *)NULL) draw_info->kerning = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "stroke"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->stroke, exception); option = GetImageOption(clone_info, "strokewidth"); if (option != (const char *)NULL) draw_info->stroke_width = StringToDouble(option, &next_token); option = GetImageOption(clone_info, "style"); if (option != (const char *)NULL) draw_info->style = (StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse, option); option = GetImageOption(clone_info, "undercolor"); if (option != (const char *)NULL) (void)QueryColorCompliance(option, AllCompliance, &draw_info->undercolor, exception); option = GetImageOption(clone_info, "weight"); if (option != (const char *)NULL) { ssize_t weight; weight = ParseCommandOption(MagickWeightOptions, MagickFalse, option); if (weight == -1) weight = (ssize_t) StringToUnsignedLong(option); draw_info->weight = (size_t) weight; } exception = DestroyExceptionInfo(exception); draw_info->signature = MagickCoreSignature; clone_info = DestroyImageInfo(clone_info); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + P e r m u t a t e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % Permutate() returns the permuation of the (n,k). % % The format * of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % * % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n, const ssize_t k) { double r; register ssize_t i; r = 1.0; for (i = k + 1; i <= n; i++) r *= i; for (i = 1; i <= (n - k); i++) r /= i; return (r); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + T r a c e P r i m i t i v e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % TracePrimitive is a collection of methods for generating graphic * % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo * mvg_info, const PointInfo start, const PointInfo end, const PointInfo degrees) { PointInfo center, radius; center.x = 0.5 * (end.x + start.x); center.y = 0.5 * (end.y + start.y); radius.x = fabs(center.x - start.x); radius.y = fabs(center.y - start.y); return (TraceEllipse(mvg_info, center, radius, degrees)); } static MagickBooleanType TraceArcPath(MVGInfo * mvg_info, const PointInfo start, const PointInfo end, const PointInfo arc, const double angle, const MagickBooleanType large_arc, const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i; size_t arc_segments; ssize_t offset; offset = mvg_info->offset; primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; primitive_info->coordinates = 0; if ((fabs(start.x - end.x) < MagickEpsilon) && (fabs(start.y - end.y) < MagickEpsilon)) return (TracePoint(primitive_info, end)); radii.x = fabs(arc.x); radii.y = fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return (TraceLine(primitive_info, start, end)); cosine = cos(DegreesToRadians(fmod((double)angle, 360.0))); sine = sin(DegreesToRadians(fmod((double)angle, 360.0))); center.x = (double)(cosine * (end.x - start.x) / 2 + sine * (end.y - start.y) / 2); center.y = (double)(cosine * (end.y - start.y) / 2 - sine * (end.x - start.x) / 2); delta = (center.x * center.x) / (radii.x * radii.x) + (center.y * center.y) / (radii.y * radii.y); if (delta < MagickEpsilon) return (TraceLine(primitive_info, start, end)); if (delta > 1.0) { radii.x *= sqrt((double)delta); radii.y *= sqrt((double)delta); } points[0].x = (double)(cosine * start.x / radii.x + sine * start.y / radii.x); points[0].y = (double)(cosine * start.y / radii.y - sine * start.x / radii.y); points[1].x = (double)(cosine * end.x / radii.x + sine * end.y / radii.x); points[1].y = (double)(cosine * end.y / radii.y - sine * end.x / radii.y); alpha = points[1].x - points[0].x; beta = points[1].y - points[0].y; if (fabs(alpha * alpha + beta * beta) < MagickEpsilon) return (TraceLine(primitive_info, start, end)); factor = PerceptibleReciprocal(alpha * alpha + beta * beta) - 0.25; if (factor <= 0.0) factor = 0.0; else { factor = sqrt((double)factor); if (sweep == large_arc) factor = (-factor); } center.x = (double)((points[0].x + points[1].x) / 2 - factor * beta); center.y = (double)((points[0].y + points[1].y) / 2 + factor * alpha); alpha = atan2(points[0].y - center.y, points[0].x - center.x); theta = atan2(points[1].y - center.y, points[1].x - center.x) - alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta += 2.0 * MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta -= 2.0 * MagickPI; arc_segments = (size_t) ceil(fabs((double)(theta / (0.5 * MagickPI + MagickEpsilon)))); status = MagickTrue; p = primitive_info; for (i = 0; i < (ssize_t) arc_segments; i++) { beta = 0.5 * ((alpha + (i + 1) * theta / arc_segments) - (alpha + i * theta / arc_segments)); gamma = (8.0 / 3.0) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) * sin(fmod((double)(0.5 * beta), DegreesToRadians(360.0))) / sin(fmod((double)beta, DegreesToRadians(360.0))); points[0].x = (double)(center.x + cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) - gamma * sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[0].y = (double)(center.y + sin(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0))) + gamma * cos(fmod((double)(alpha + (double)i * theta / arc_segments), DegreesToRadians(360.0)))); points[2].x = (double)(center.x + cos(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[2].y = (double)(center.y + sin(fmod((double)(alpha + (double)(i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].x = (double)(points[2].x + gamma * sin(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); points[1].y = (double)(points[2].y - gamma * cos(fmod((double)(alpha + (double) (i + 1) * theta / arc_segments), DegreesToRadians(360.0)))); p->point.x = (p == primitive_info) ? start.x : (p - 1)->point.x; p->point.y = (p == primitive_info) ? start.y : (p - 1)->point.y; (p + 1)->point.x = (double)(cosine * radii.x * points[0].x - sine * radii.y * points[0].y); (p + 1)->point.y = (double)(sine * radii.x * points[0].x + cosine * radii.y * points[0].y); (p + 2)->point.x = (double)(cosine * radii.x * points[1].x - sine * radii.y * points[1].y); (p + 2)->point.y = (double)(sine * radii.x * points[1].x + cosine * radii.y * points[1].y); (p + 3)->point.x = (double)(cosine * radii.x * points[2].x - sine * radii.y * points[2].y); (p + 3)->point.y = (double)(sine * radii.x * points[2].x + cosine * radii.y * points[2].y); if (i == (ssize_t) (arc_segments - 1)) (p + 3)->point = end; status &= TraceBezier(mvg_info, 4); if (status == 0) break; p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; p += p->coordinates; } if (status == 0) return (MagickFalse); mvg_info->offset = offset; primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickFalse; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo * mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i, j; size_t control_points, quantum; /* * Allocate coefficients. */ primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; quantum = number_coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { for (j = i + 1; j < (ssize_t) number_coordinates; j++) { alpha = fabs(primitive_info[j].point.x - primitive_info[i].point.x); if (alpha > (double)SSIZE_MAX) { (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } if (alpha > (double)quantum) quantum = (size_t) alpha; alpha = fabs(primitive_info[j].point.y - primitive_info[i].point.y); if (alpha > (double)SSIZE_MAX) { (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } if (alpha > (double)quantum) quantum = (size_t) alpha; } } primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; quantum = MagickMin(quantum / number_coordinates, BezierQuantum); coefficients = (double *)AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points = (PointInfo *) AcquireQuantumMemory(quantum, number_coordinates * sizeof(*points)); if ((coefficients == (double *)NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points = (PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *)NULL) coefficients = (double *)RelinquishMagickMemory(coefficients); (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } control_points = quantum * number_coordinates; if (CheckPrimitiveExtent(mvg_info, control_points + 1) == MagickFalse) { points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickFalse); } primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; /* * Compute bezier points. */ end = primitive_info[number_coordinates - 1].point; for (i = 0; i < (ssize_t) number_coordinates; i++) coefficients[i] = Permutate((ssize_t) number_coordinates - 1, i); weight = 0.0; for (i = 0; i < (ssize_t) control_points; i++) { p = primitive_info; point.x = 0.0; point.y = 0.0; alpha = pow((double)(1.0 - weight), (double)number_coordinates - 1.0); for (j = 0; j < (ssize_t) number_coordinates; j++) { point.x += alpha * coefficients[j] * p->point.x; point.y += alpha * coefficients[j] * p->point.y; alpha *= weight / (1.0 - weight); p++; } points[i] = point; weight += 1.0 / control_points; } /* * Bezier curves are just short segmented polys. */ p = primitive_info; for (i = 0; i < (ssize_t) control_points; i++) { if (TracePoint(p, points[i]) == MagickFalse) { points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickFalse); } p += p->coordinates; } if (TracePoint(p, end) == MagickFalse) { points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickFalse); } p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickFalse; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } points = (PointInfo *) RelinquishMagickMemory(points); coefficients = (double *)RelinquishMagickMemory(coefficients); return (MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo * mvg_info, const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha = end.x - start.x; beta = end.y - start.y; radius = hypot((double)alpha, (double)beta); offset.x = (double)radius; offset.y = (double)radius; degrees.x = 0.0; degrees.y = 360.0; return (TraceEllipse(mvg_info, start, offset, degrees)); } static MagickBooleanType TraceEllipse(MVGInfo * mvg_info, const PointInfo center, const PointInfo radii, const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i; /* * Ellipses are just short segmented polys. */ primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; primitive_info->coordinates = 0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return (MagickTrue); delta = 2.0 * PerceptibleReciprocal(MagickMax(radii.x, radii.y)); step = MagickPI / 8.0; if ((delta >= 0.0) && (delta < (MagickPI / 8.0))) step = MagickPI / 4.0 / (MagickPI * PerceptibleReciprocal(delta) / 2.0); angle.x = DegreesToRadians(arc.x); y = arc.y; while (y < arc.x) y += 360.0; angle.y = DegreesToRadians(y); coordinates = ceil((angle.y - angle.x) / step + 1.0); if (coordinates > (double)SSIZE_MAX) { (void)ThrowMagickException(mvg_info->exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", ""); return (MagickFalse); } if (CheckPrimitiveExtent(mvg_info, (size_t) coordinates) == MagickFalse) return (MagickFalse); primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; for (p = primitive_info; angle.x < angle.y; angle.x += step) { point.x = cos(fmod(angle.x, DegreesToRadians(360.0))) * radii.x + center.x; point.y = sin(fmod(angle.x, DegreesToRadians(360.0))) * radii.y + center.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; } point.x = cos(fmod(angle.y, DegreesToRadians(360.0))) * radii.x + center.x; point.y = sin(fmod(angle.y, DegreesToRadians(360.0))) * radii.y + center.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickFalse; x = fabs(primitive_info[0].point.x - primitive_info[primitive_info->coordinates - 1].point.x); y = fabs(primitive_info[0].point.y - primitive_info[primitive_info->coordinates - 1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath = MagickTrue; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { if (TracePoint(primitive_info, start) == MagickFalse) return (MagickFalse); if ((fabs(start.x - end.x) < MagickEpsilon) && (fabs(start.y - end.y) < MagickEpsilon)) { primitive_info->primitive = PointPrimitive; primitive_info->coordinates = 1; return (MagickTrue); } if (TracePoint(primitive_info + 1, end) == MagickFalse) return (MagickFalse); (primitive_info + 1)->primitive = primitive_info->primitive; primitive_info->coordinates = 2; primitive_info->closed_subpath = MagickFalse; return (MagickTrue); } static ssize_t TracePath(MVGInfo * mvg_info, const char *path, ExceptionInfo * exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = { 0.0, 0.0 }, points[4] = { { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 }, { 0.0, 0.0 } }, point = { 0.0, 0.0 }, start = { 0.0, 0.0 }; PrimitiveInfo * primitive_info; PrimitiveType primitive_type; register PrimitiveInfo * q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset = mvg_info->offset; primitive_info = (*mvg_info->primitive_info) + mvg_info->offset; status = MagickTrue; attribute = 0; number_coordinates = 0; z_count = 0; primitive_type = primitive_info->primitive; q = primitive_info; for (p = path; *p != '\0';) { if (status == MagickFalse) break; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == '\0') break; last_attribute = attribute; attribute = (int)(*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = { 0.0, 0.0 }; /* * Elliptical arc. */ do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); arc.x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); arc.y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); angle = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); large_arc = StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); sweep = StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); end.x = (double)(attribute == (int)'A' ? x : point.x + x); end.y = (double)(attribute == (int)'A' ? y : point.y + y); if (TraceArcPath(mvg_info, point, end, arc, angle, large_arc, sweep) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* * Cubic Bézier curve. */ do { points[0] = point; for (i = 1; i < 4; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); end.x = (double)(attribute == (int)'C' ? x : point.x + x); end.y = (double)(attribute == (int)'C' ? y : point.y + y); points[i] = end; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 4) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.x = (double)(attribute == (int)'H' ? x : point.x + x); if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* * Line to. */ do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.x = (double)(attribute == (int)'L' ? x : point.x + x); point.y = (double)(attribute == (int)'L' ? y : point.y + y); if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* * Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info = (*mvg_info->primitive_info) + subpath_offset; primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; primitive_info = q; subpath_offset = mvg_info->offset; } i = 0; do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.x = (double)(attribute == (int)'M' ? x : point.x + x); point.y = (double)(attribute == (int)'M' ? y : point.y + y); if (i == 0) start = point; i++; if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* * Quadratic Bézier curve. */ do { points[0] = point; for (i = 1; i < 3; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (*p == ',') p++; end.x = (double)(attribute == (int)'Q' ? x : point.x + x); end.y = (double)(attribute == (int)'Q' ? y : point.y + y); points[i] = end; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 3) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* * Cubic Bézier curve. */ do { points[0] = points[3]; points[1].x = 2.0 * points[3].x - points[2].x; points[1].y = 2.0 * points[3].y - points[2].y; for (i = 2; i < 4; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); if (*p == ',') p++; end.x = (double)(attribute == (int)'S' ? x : point.x + x); end.y = (double)(attribute == (int)'S' ? y : point.y + y); points[i] = end; } if (strchr("CcSs", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 4; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 4) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; last_attribute = attribute; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* * Quadratic Bézier curve. */ do { points[0] = points[2]; points[1].x = 2.0 * points[2].x - points[1].x; points[1].y = 2.0 * points[2].y - points[1].y; for (i = 2; i < 3; i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); x = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); end.x = (double)(attribute == (int)'T' ? x : point.x + x); end.y = (double)(attribute == (int)'T' ? y : point.y + y); points[i] = end; } if (status == MagickFalse) break; if (strchr("QqTt", last_attribute) == (char *)NULL) { points[0] = point; points[1] = point; } for (i = 0; i < 3; i++) (q + i)->point = points[i]; if (TraceBezier(mvg_info, 3) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += q->coordinates; q += q->coordinates; point = end; last_attribute = attribute; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* * Line to. */ do { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); y = StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token, exception); point.y = (double)(attribute == (int)'V' ? y : point.y + y); if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; while (isspace((int)((unsigned char)*p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* * Close path. */ point = start; if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (-1); q = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(q, point) == MagickFalse) return (-1); mvg_info->offset += q->coordinates; q += q->coordinates; primitive_info = (*mvg_info->primitive_info) + subpath_offset; primitive_info->coordinates = (size_t) (q - primitive_info); primitive_info->closed_subpath = MagickTrue; number_coordinates += primitive_info->coordinates; primitive_info = q; subpath_offset = mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token, exception); break; } } } if (status == MagickFalse) return (-1); primitive_info = (*mvg_info->primitive_info) + subpath_offset; primitive_info->coordinates = (size_t) (q - primitive_info); number_coordinates += primitive_info->coordinates; for (i = 0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive = primitive_type; if (z_count > 1) q->method = FillToBorderMethod; } q = primitive_info; return ((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo * primitive_info, const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo * p; register ssize_t i; p = primitive_info; if (TracePoint(p, start) == MagickFalse) return (MagickFalse); p += p->coordinates; point.x = start.x; point.y = end.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; if (TracePoint(p, end) == MagickFalse) return (MagickFalse); p += p->coordinates; point.x = end.x; point.y = start.y; if (TracePoint(p, point) == MagickFalse) return (MagickFalse); p += p->coordinates; if (TracePoint(p, start) == MagickFalse) return (MagickFalse); p += p->coordinates; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickTrue; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo * mvg_info, const PointInfo start, const PointInfo end, PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo * primitive_info; register PrimitiveInfo * p; register ssize_t i; ssize_t offset; offset = mvg_info->offset; segment.x = fabs(end.x - start.x); segment.y = fabs(end.y - start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info + mvg_info->offset)->coordinates = 0; return (MagickTrue); } if (arc.x > (0.5 * segment.x)) arc.x = 0.5 * segment.x; if (arc.y > (0.5 * segment.y)) arc.y = 0.5 * segment.y; point.x = start.x + segment.x - arc.x; point.y = start.y + arc.y; degrees.x = 270.0; degrees.y = 360.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; point.x = start.x + segment.x - arc.x; point.y = start.y + segment.y - arc.y; degrees.x = 0.0; degrees.y = 90.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; point.x = start.x + arc.x; point.y = start.y + segment.y - arc.y; degrees.x = 90.0; degrees.y = 180.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; point.x = start.x + arc.x; point.y = start.y + arc.y; degrees.x = 180.0; degrees.y = 270.0; if (TraceEllipse(mvg_info, point, arc, degrees) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; mvg_info->offset += p->coordinates; if (CheckPrimitiveExtent(mvg_info, PrimitiveExtentPad) == MagickFalse) return (MagickFalse); p = (*mvg_info->primitive_info) + mvg_info->offset; if (TracePoint(p, (*mvg_info->primitive_info + offset)->point) == MagickFalse) return (MagickFalse); p += p->coordinates; mvg_info->offset = offset; primitive_info = (*mvg_info->primitive_info) + offset; primitive_info->coordinates = (size_t) (p - primitive_info); primitive_info->closed_subpath = MagickTrue; for (i = 0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive = primitive_info->primitive; p--; } return (MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo * primitive_info, const size_t number_vertices, const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx = 0.0; dy = 0.0; for (i = 1; i < (ssize_t) number_vertices; i++) { dx = primitive_info[0].point.x - primitive_info[i].point.x; dy = primitive_info[0].point.y - primitive_info[i].point.y; if ((fabs((double)dx) >= MagickEpsilon) || (fabs((double)dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i = (ssize_t) number_vertices - 1L; distance = hypot((double)dx, (double)dy); primitive_info[0].point.x = (double)(primitive_info[i].point.x + dx * (distance + offset) / distance); primitive_info[0].point.y = (double)(primitive_info[i].point.y + dy * (distance + offset) / distance); for (j = (ssize_t) number_vertices - 2; j >= 0; j--) { dx = primitive_info[number_vertices - 1].point.x - primitive_info[j].point.x; dy = primitive_info[number_vertices - 1].point.y - primitive_info[j].point.y; if ((fabs((double)dx) >= MagickEpsilon) || (fabs((double)dy) >= MagickEpsilon)) break; } distance = hypot((double)dx, (double)dy); primitive_info[number_vertices - 1].point.x = (double)(primitive_info[j].point.x + dx * (distance + offset) / distance); primitive_info[number_vertices - 1].point.y = (double)(primitive_info[j].point.y + dy * (distance + offset) / distance); return (MagickTrue); } static PrimitiveInfo * TraceStrokePolygon(const Image * image, const DrawInfo * draw_info, const PrimitiveInfo * primitive_info) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo * polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = { 0.0, 0.0 }, dy = { 0.0, 0.0 }, inverse_slope = { 0.0, 0.0 }, slope = { 0.0, 0.0 }, theta = { 0.0, 0.0 }; /* * Allocate paths. */ number_vertices = primitive_info->coordinates; polygon_primitive = (PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices + 2UL, sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return ((PrimitiveInfo *) NULL); (void)memcpy(polygon_primitive, primitive_info, (size_t) number_vertices * sizeof(*polygon_primitive)); offset.x = primitive_info[number_vertices - 1].point.x - primitive_info[0].point.x; offset.y = primitive_info[number_vertices - 1].point.y - primitive_info[0].point.y; closed_path = (fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices] = primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive = UndefinedPrimitive; /* * Compute the slope for the first line segment, p. */ dx.p = 0.0; dy.p = 0.0; for (n = 1; n < (ssize_t) number_vertices; n++) { dx.p = polygon_primitive[n].point.x - polygon_primitive[0].point.x; dy.p = polygon_primitive[n].point.y - polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* * Zero length subpath. */ stroke_polygon = (PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0] = polygon_primitive[0]; stroke_polygon[0].coordinates = 0; polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return (stroke_polygon); } n = (ssize_t) number_vertices - 1L; } extent_p = 2 * number_vertices; extent_q = 2 * number_vertices; stroke_p = (PointInfo *) AcquireQuantumMemory((size_t) extent_p + MaxStrokePad, sizeof(*stroke_p)); stroke_q = (PointInfo *) AcquireQuantumMemory((size_t) extent_q + MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p = (PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q = (PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return ((PrimitiveInfo *) NULL); } slope.p = 0.0; inverse_slope.p = 0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p = dy.p < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else slope.p = dy.p < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p = dx.p < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else inverse_slope.p = dx.p < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else { slope.p = dy.p / dx.p; inverse_slope.p = (-1.0 / slope.p); } mid = ExpandAffine(&draw_info->affine) * SaneStrokeWidth(image, draw_info) / 2.0; miterlimit = (double)(draw_info->miterlimit * draw_info->miterlimit * mid * mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void)TraceSquareLinecap(polygon_primitive, number_vertices, mid); offset.x = sqrt((double)(mid * mid / (inverse_slope.p * inverse_slope.p + 1.0))); offset.y = (double)(offset.x * inverse_slope.p); if ((dy.p * offset.x - dx.p * offset.y) > 0.0) { box_p[0].x = polygon_primitive[0].point.x - offset.x; box_p[0].y = polygon_primitive[0].point.y - offset.x * inverse_slope.p; box_p[1].x = polygon_primitive[n].point.x - offset.x; box_p[1].y = polygon_primitive[n].point.y - offset.x * inverse_slope.p; box_q[0].x = polygon_primitive[0].point.x + offset.x; box_q[0].y = polygon_primitive[0].point.y + offset.x * inverse_slope.p; box_q[1].x = polygon_primitive[n].point.x + offset.x; box_q[1].y = polygon_primitive[n].point.y + offset.x * inverse_slope.p; } else { box_p[0].x = polygon_primitive[0].point.x + offset.x; box_p[0].y = polygon_primitive[0].point.y + offset.y; box_p[1].x = polygon_primitive[n].point.x + offset.x; box_p[1].y = polygon_primitive[n].point.y + offset.y; box_q[0].x = polygon_primitive[0].point.x - offset.x; box_q[0].y = polygon_primitive[0].point.y - offset.y; box_q[1].x = polygon_primitive[n].point.x - offset.x; box_q[1].y = polygon_primitive[n].point.y - offset.y; } /* * Create strokes for the line join attribute: bevel, miter, round. */ p = 0; q = 0; stroke_q[p++] = box_q[0]; stroke_p[q++] = box_p[0]; for (i = (ssize_t) n + 1; i < (ssize_t) number_vertices; i++) { /* * Compute the slope for this line segment, q. */ dx.q = polygon_primitive[i].point.x - polygon_primitive[n].point.x; dy.q = polygon_primitive[i].point.y - polygon_primitive[n].point.y; dot_product = dx.q * dx.q + dy.q * dy.q; if (dot_product < 0.25) continue; slope.q = 0.0; inverse_slope.q = 0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q = dy.q < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else slope.q = dy.q < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q = dx.q < 0.0 ? -1.0 / MagickEpsilon : 1.0 / MagickEpsilon; else inverse_slope.q = dx.q < 0.0 ? 1.0 / MagickEpsilon : -1.0 / MagickEpsilon; } else { slope.q = dy.q / dx.q; inverse_slope.q = (-1.0 / slope.q); } offset.x = sqrt((double)(mid * mid / (inverse_slope.q * inverse_slope.q + 1.0))); offset.y = (double)(offset.x * inverse_slope.q); dot_product = dy.q * offset.x - dx.q * offset.y; if (dot_product > 0.0) { box_p[2].x = polygon_primitive[n].point.x - offset.x; box_p[2].y = polygon_primitive[n].point.y - offset.y; box_p[3].x = polygon_primitive[i].point.x - offset.x; box_p[3].y = polygon_primitive[i].point.y - offset.y; box_q[2].x = polygon_primitive[n].point.x + offset.x; box_q[2].y = polygon_primitive[n].point.y + offset.y; box_q[3].x = polygon_primitive[i].point.x + offset.x; box_q[3].y = polygon_primitive[i].point.y + offset.y; } else { box_p[2].x = polygon_primitive[n].point.x + offset.x; box_p[2].y = polygon_primitive[n].point.y + offset.y; box_p[3].x = polygon_primitive[i].point.x + offset.x; box_p[3].y = polygon_primitive[i].point.y + offset.y; box_q[2].x = polygon_primitive[n].point.x - offset.x; box_q[2].y = polygon_primitive[n].point.y - offset.y; box_q[3].x = polygon_primitive[i].point.x - offset.x; box_q[3].y = polygon_primitive[i].point.y - offset.y; } if (fabs((double)(slope.p - slope.q)) < MagickEpsilon) { box_p[4] = box_p[1]; box_q[4] = box_q[1]; } else { box_p[4].x = (double)((slope.p * box_p[0].x - box_p[0].y - slope.q * box_p[3].x + box_p[3].y) / (slope.p - slope.q)); box_p[4].y = (double)(slope.p * (box_p[4].x - box_p[0].x) + box_p[0].y); box_q[4].x = (double)((slope.p * box_q[0].x - box_q[0].y - slope.q * box_q[3].x + box_q[3].y) / (slope.p - slope.q)); box_q[4].y = (double)(slope.p * (box_q[4].x - box_q[0].x) + box_q[0].y); } CheckPathExtent(MaxStrokePad, MaxStrokePad); dot_product = dx.q * dy.p - dx.p * dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++] = box_p[4]; else { stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++] = box_q[4]; stroke_p[p++] = box_p[4]; } else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++] = box_p[4]; else { stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_q[1].y - center.y, box_q[1].x - center.x); theta.q = atan2(box_q[2].y - center.y, box_q[2].x - center.x); if (theta.q < theta.p) theta.q += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.q - theta.p) / (2.0 * sqrt((double)(1.0 / mid))))); CheckPathExtent(MaxStrokePad, arc_segments + MaxStrokePad); stroke_q[q].x = box_q[1].x; stroke_q[q].y = box_q[1].y; q++; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); stroke_q[q].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); stroke_q[q].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); q++; } stroke_q[q++] = box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++] = box_q[4]; else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; } break; } case MiterJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++] = box_q[4]; stroke_p[p++] = box_p[4]; } else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; stroke_p[p++] = box_p[1]; stroke_p[p++] = box_p[2]; } break; } case RoundJoin: { dot_product = (box_q[4].x - box_p[4].x) * (box_q[4].x - box_p[4].x) + (box_q[4].y - box_p[4].y) * (box_q[4].y - box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++] = box_q[4]; else { stroke_q[q++] = box_q[1]; stroke_q[q++] = box_q[2]; } center = polygon_primitive[n].point; theta.p = atan2(box_p[1].y - center.y, box_p[1].x - center.x); theta.q = atan2(box_p[2].y - center.y, box_p[2].x - center.x); if (theta.p < theta.q) theta.p += 2.0 * MagickPI; arc_segments = (size_t) ceil((double)((theta.p - theta.q) / (2.0 * sqrt((double)(1.0 / mid))))); CheckPathExtent(arc_segments + MaxStrokePad, MaxStrokePad); stroke_p[p++] = box_p[1]; for (j = 1; j < (ssize_t) arc_segments; j++) { delta_theta = (double)(j * (theta.q - theta.p) / arc_segments); stroke_p[p].x = (double)(center.x + mid * cos(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); stroke_p[p].y = (double)(center.y + mid * sin(fmod((double) (theta.p + delta_theta), DegreesToRadians(360.0)))); p++; } stroke_p[p++] = box_p[2]; break; } default: break; } slope.p = slope.q; inverse_slope.p = inverse_slope.q; box_p[0] = box_p[2]; box_p[1] = box_p[3]; box_q[0] = box_q[2]; box_q[1] = box_q[3]; dx.p = dx.q; dy.p = dy.q; n = i; } stroke_p[p++] = box_p[1]; stroke_q[q++] = box_q[1]; /* * Trace stroked polygon. */ stroke_polygon = (PrimitiveInfo *) AcquireQuantumMemory((size_t) (p + q + 2UL * closed_path + 2UL), sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i = 0; i < (ssize_t) p; i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; } for (; i < (ssize_t) (p + q + closed_path); i++) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_q[p + q + closed_path - (i + 1)]; } if (closed_path != MagickFalse) { stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[p + closed_path].point; i++; } stroke_polygon[i] = polygon_primitive[0]; stroke_polygon[i].point = stroke_polygon[0].point; i++; stroke_polygon[i].primitive = UndefinedPrimitive; stroke_polygon[0].coordinates = (size_t) (p + q + 2 * closed_path + 1); } stroke_p = (PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q = (PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive = (PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return (stroke_polygon); }
AUC-hybrid.c
// Program: AUC-hybrid // Author: Jason Regina // Date: 12 November 2015 // Description: This program approximates pi using the Riemann Sum method #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <mpi.h> #include <omp.h> // This function returns a y-value on a unit circle // centered at the origin, given an x-value double func(double x) { return sqrt(1.0 - (x * x)); } int main( int argc, char** argv ) { // Set number of rectangles int recs = 100000000; int num_threads = 1; // Initialize MPI int rank = 0, procs = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &procs); // Parse command line const char* name = argv[0]; int c; while ((c = getopt(argc, argv, "n:t:")) != -1) { switch(c) { case 'n': recs = atoi(optarg); break; case 't': num_threads = atoi(optarg); break; case '?': default: fprintf(stderr, "Usage: %s -n [NUMBER_OF_RECTANGLES] -t [NUM_OMP_THREADS]\n", name); return -1; } } argc -+ optind; argv += optind; // Calculate rectangle width double width; width = 1.0 / recs; // Determine first and last elements of process int first = 0, last = recs; first = rank * (recs / procs); if (rank != (procs - 1)) last = first + (recs / procs); // Set number of OMP_THREADS omp_set_num_threads(num_threads); // Calculate total area double sum = 0.0; int i = 0; #pragma omp parallel for reduction(+:sum) shared(first,last,width) private(i) for (i = first; i < last; i++) { sum += func(width * i) * width * 4.0; } // Calculate total sum double total_sum = 0.0; MPI_Reduce(&sum, &total_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); // Print result if (rank == 0) { printf(" --- %s --- \n", name); printf("Number of processes: %d\n", procs); printf("Threads per process: %d\n", num_threads); printf("Rectangles : %d\n", recs); printf("pi is approximately: %f\n", total_sum); } // Terminate MPI_Finalize(); return 0; }
// Program:AUC - hybrid // Author:Jason Regina // Date:12 November 2015 // Description:This program approximates pi using the Riemann Sum method #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <mpi.h> #include <omp.h> // This function returns a y - value on a unit circle // centered at the origin, given an x - value double func(double x) { return sqrt(1.0 - (x * x)); } int main(int argc, char **argv) { //Set number of rectangles int recs = 100000000; int num_threads = 1; //Initialize MPI int rank = 0, procs = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &procs); //Parse command line const char *name = argv[0]; int c; while ((c = getopt(argc, argv, "n:t:")) != -1) { switch (c) { case 'n': recs = atoi(optarg); break; case 't': num_threads = atoi(optarg); break; case '?': default: fprintf(stderr, "Usage: %s -n [NUMBER_OF_RECTANGLES] -t [NUM_OMP_THREADS]\n", name); return -1; } } argc - +optind; argv += optind; //Calculate rectangle width double width; width = 1.0 / recs; //Determine first and last elements of process int first = 0, last = recs; first = rank * (recs / procs); if (rank != (procs - 1)) last = first + (recs / procs); //Set number of OMP_THREADS omp_set_num_threads(num_threads); //Calculate total area double sum = 0.0; int i = 0; for (i = first; i < last; i++) { sum += func(width * i) * width * 4.0; } //Calculate total sum double total_sum = 0.0; MPI_Reduce(&sum, &total_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); //Print result if (rank == 0) { printf(" --- %s --- \n", name); printf("Number of processes: %d\n", procs); printf("Threads per process: %d\n", num_threads); printf("Rectangles : %d\n", recs); printf("pi is approximately: %f\n", total_sum); } //Terminate MPI_Finalize(); return 0; }
// Program:AUC - hybrid // Author:Jason Regina // Date:12 November 2015 // Description:This program approximates pi using the Riemann Sum method #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <math.h> #include <mpi.h> #include <omp.h> // This function returns a y - value on a unit circle // centered at the origin, given an x - value double func(double x) { return sqrt(1.0 - (x * x)); } int main(int argc, char **argv) { //Set number of rectangles int recs = 100000000; int num_threads = 1; //Initialize MPI int rank = 0, procs = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &procs); //Parse command line const char *name = argv[0]; int c; while ((c = getopt(argc, argv, "n:t:")) != -1) { switch (c) { case 'n': recs = atoi(optarg); break; case 't': num_threads = atoi(optarg); break; case '?': default: fprintf(stderr, "Usage: %s -n [NUMBER_OF_RECTANGLES] -t [NUM_OMP_THREADS]\n", name); return -1; } } argc - +optind; argv += optind; //Calculate rectangle width double width; width = 1.0 / recs; //Determine first and last elements of process int first = 0, last = recs; first = rank * (recs / procs); if (rank != (procs - 1)) last = first + (recs / procs); //Set number of OMP_THREADS omp_set_num_threads(num_threads); //Calculate total area double sum = 0.0; int i = 0; #pragma omp parallel for reduction(+:sum) shared(first,last,width) private(i) for (i = first; i < last; i++) { sum += func(width * i) * width * 4.0; } //Calculate total sum double total_sum = 0.0; MPI_Reduce(&sum, &total_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); //Print result if (rank == 0) { printf(" --- %s --- \n", name); printf("Number of processes: %d\n", procs); printf("Threads per process: %d\n", num_threads); printf("Rectangles : %d\n", recs); printf("pi is approximately: %f\n", total_sum); } //Terminate MPI_Finalize(); return 0; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) { for (t4=max(max(max(0,ceild(t1-1023,1024)),ceild(4*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t2+Nx,2048),floord(Nt+Nx-4,2048)),floord(2*t1+Nx+1,2048)),floord(24*t3+Nx+20,2048)),floord(4*t1-4*t2+Nz+Nx-1,2048));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),2048*t4+2046),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) { for (t4=max(max(max(0,ceild(t1-1023,1024)),ceild(4*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t2+Nx,2048),floord(Nt+Nx-4,2048)),floord(2*t1+Nx+1,2048)),floord(24*t3+Nx+20,2048)),floord(4*t1-4*t2+Nz+Nx-1,2048));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),2048*t4+2046),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-11,12)),ceild(4*t2-Nz-20,24));t3<=min(min(min(floord(4*t2+Ny,24),floord(Nt+Ny-4,24)),floord(2*t1+Ny+1,24)),floord(4*t1-4*t2+Nz+Ny-1,24));t3++) { for (t4=max(max(max(0,ceild(t1-1023,1024)),ceild(4*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(4*t2+Nx,2048),floord(Nt+Nx-4,2048)),floord(2*t1+Nx+1,2048)),floord(24*t3+Nx+20,2048)),floord(4*t1-4*t2+Nz+Nx-1,2048));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),24*t3+22),2048*t4+2046),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__pair_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // B,b type: double // BinaryOp: cij = 1 #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // B,b type: double // BinaryOp: cij = 1 #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fp64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fp64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: double // A type: double // B,b type: double // BinaryOp: cij = 1 #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FP64 || GxB_NO_PAIR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
parallel-quicksort.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include "omp.h" void quicksort(int *array, int start, int end, int threads); int ordena(int *array, int start, int end, int pivot, int* lvec); void merge(int* array, int start, int end, int* lvec, int u, int* copia); int main() { const int n = 9; const int p = 3; omp_set_num_threads(p); int a[] = { 7, 12, 1, -2, 0, 15, 4, 11, 9}; int i; printf("\n\nVector desordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); #pragma omp parallel #pragma omp single quicksort( a, 0, n, p); printf("\n\nVector ordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); return 0; } void quicksort(int *array, int start, int end, int threads) { if(threads <= 1 || end - start < 2) //si solo se ha asignado un hilo a esta parte (sea L o U) o si sólo hay un número en ella, salgo de la recursión parallela return; //habría que añadir que, en caso de que haya más de un número, debería realizarse para el único hilo con el que se ha llamado a esta recursión el algoritmo no paralelo int pivot, i, u = 0; //u es una variable que guardará el número de elementos menores que el pivote int* lvec = calloc(end - start, sizeof(int)); //un vector que tendrá 1s en las posiciones en que el array tenga elementos menores o iguales que el pivote y 0s en las que los elementos sean mayores //esta asignación de 1s y 0s se hace una vez los trozos del array se han ordenado en paralelo según el pivote int* copia = malloc(sizeof(int) * (end - start)); //servirá como vector auxiliar para no pisar los datos en el array original srand(time(NULL)); pivot = array[rand() % (end - start) + start]; printf("pivot: %d\n", pivot); for(i = 0; i < threads; i++) //se lanzan tantas tareas como hilos se hayan asignado a el array (o trozo del array) repartiendo las posiciones equitativamente entre los hilos { if (i+1 == threads) { #pragma omp task ordena(array, start + (i*((end - start) / threads)), end, pivot, lvec); } else { #pragma omp task ordena(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), pivot, lvec); } } #pragma omp taskwait //esperamos a que los trozos hayan sido ordenados for (i = start; i < end; i++) //rellenamos la variable @u con el número total de elementos menores que el pivote if(lvec[i] == 1) u++; for(i = 0; i < threads; i++) //uno las partes menores que el pivote a la izquierda del vector y las mayores a la derecha en paralelo con tareas if (i+1 == threads) #pragma omp task merge(array, start + (i*((end - start) / threads)), end, lvec, u, copia); else #pragma omp task merge(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), lvec, u, copia); #pragma omp taskwait //en este punto tengo en @copia el vector ordenado para esta iteración //En función al tamaño de la parte menor y mayor que el pivote, reparto el número de hilos disponible int repartohilos = (int) (((float) u / (end - start) ) * threads + 0.5 ); int rpeartohilos2 = (int) (((float) ((end - start) - u) / (end - start) ) * threads + 0.5 ); //copio @copia en @array, esto debería hacerse sólo al final, pasando simplemente @copia durante la recursión for(i = start; i < end; i++) array[i] = copia[i]; /* #pragma omp task quicksort(array, start, start + u, repartohilos); #pragma omp task quicksort(array, start + u, end, rpeartohilos2);*/ } void merge(int* array, int start, int end, int* lvec, int u, int* copia) { int l = 0, i; for (i = 0; i < start; i++) if(lvec[i]==1) l++; else u++; for (i = start; i < end; i++) if(lvec[i] == 1) { copia[l] = array[i]; l++; } else { copia[u] = array[i]; u++; } } int ordena(int *array, int start, int end, int pivot, int* lvec) { int i, tmp, numElementos = 0; for(i = start; i < end; i++) if (array[i] <= pivot) { lvec[start] = 1; numElementos++; tmp = array[start]; array[start] = array[i]; array[i] = tmp; start++; } return numElementos; }
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include "omp.h" void quicksort(int *array, int start, int end, int threads); int ordena(int *array, int start, int end, int pivot, int* lvec); void merge(int* array, int start, int end, int* lvec, int u, int* copia); int main() { const int n = 9; const int p = 3; omp_set_num_threads(p); int a[] = { 7, 12, 1, -2, 0, 15, 4, 11, 9}; int i; printf("\n\nVector desordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); quicksort( a, 0, n, p); printf("\n\nVector ordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); return 0; } void quicksort(int *array, int start, int end, int threads) { if(threads <= 1 || end - start < 2) //si solo se ha asignado un hilo a esta parte (sea L o U) o si sólo hay un número en ella, salgo de la recursión parallela return; //habría que añadir que, en caso de que haya más de un número, debería realizarse para el único hilo con el que se ha llamado a esta recursión el algoritmo no paralelo int pivot, i, u = 0; //u es una variable que guardará el número de elementos menores que el pivote int* lvec = calloc(end - start, sizeof(int)); //un vector que tendrá 1s en las posiciones en que el array tenga elementos menores o iguales que el pivote y 0s en las que los elementos sean mayores //esta asignación de 1s y 0s se hace una vez los trozos del array se han ordenado en paralelo según el pivote int* copia = malloc(sizeof(int) * (end - start)); //servirá como vector auxiliar para no pisar los datos en el array original srand(time(NULL)); pivot = array[rand() % (end - start) + start]; printf("pivot: %d\n", pivot); for(i = 0; i < threads; i++) //se lanzan tantas tareas como hilos se hayan asignado a el array (o trozo del array) repartiendo las posiciones equitativamente entre los hilos { if (i+1 == threads) { ordena(array, start + (i*((end - start) / threads)), end, pivot, lvec); } else { ordena(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), pivot, lvec); } } for (i = start; i < end; i++) //rellenamos la variable @u con el número total de elementos menores que el pivote if(lvec[i] == 1) u++; for(i = 0; i < threads; i++) //uno las partes menores que el pivote a la izquierda del vector y las mayores a la derecha en paralelo con tareas if (i+1 == threads) merge(array, start + (i*((end - start) / threads)), end, lvec, u, copia); else merge(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), lvec, u, copia); //en este punto tengo en @copia el vector ordenado para esta iteración //En función al tamaño de la parte menor y mayor que el pivote, reparto el número de hilos disponible int repartohilos = (int) (((float) u / (end - start) ) * threads + 0.5 ); int rpeartohilos2 = (int) (((float) ((end - start) - u) / (end - start) ) * threads + 0.5 ); //copio @copia en @array, esto debería hacerse sólo al final, pasando simplemente @copia durante la recursión for(i = start; i < end; i++) array[i] = copia[i]; /* quicksort(array, start, start + u, repartohilos); quicksort(array, start + u, end, rpeartohilos2);*/ } void merge(int* array, int start, int end, int* lvec, int u, int* copia) { int l = 0, i; for (i = 0; i < start; i++) if(lvec[i]==1) l++; else u++; for (i = start; i < end; i++) if(lvec[i] == 1) { copia[l] = array[i]; l++; } else { copia[u] = array[i]; u++; } } int ordena(int *array, int start, int end, int pivot, int* lvec) { int i, tmp, numElementos = 0; for(i = start; i < end; i++) if (array[i] <= pivot) { lvec[start] = 1; numElementos++; tmp = array[start]; array[start] = array[i]; array[i] = tmp; start++; } return numElementos; }
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include "omp.h" void quicksort(int *array, int start, int end, int threads); int ordena(int *array, int start, int end, int pivot, int* lvec); void merge(int* array, int start, int end, int* lvec, int u, int* copia); int main() { const int n = 9; const int p = 3; omp_set_num_threads(p); int a[] = { 7, 12, 1, -2, 0, 15, 4, 11, 9}; int i; printf("\n\nVector desordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); #pragma omp parallel #pragma omp single quicksort( a, 0, n, p); printf("\n\nVector ordenado: "); for(i = 0; i < n; ++i) printf(" %d ", a[i]); printf("\n"); return 0; } void quicksort(int *array, int start, int end, int threads) { if(threads <= 1 || end - start < 2) //si solo se ha asignado un hilo a esta parte (sea L o U) o si sólo hay un número en ella, salgo de la recursión parallela return; //habría que añadir que, en caso de que haya más de un número, debería realizarse para el único hilo con el que se ha llamado a esta recursión el algoritmo no paralelo int pivot, i, u = 0; //u es una variable que guardará el número de elementos menores que el pivote int* lvec = calloc(end - start, sizeof(int)); //un vector que tendrá 1s en las posiciones en que el array tenga elementos menores o iguales que el pivote y 0s en las que los elementos sean mayores //esta asignación de 1s y 0s se hace una vez los trozos del array se han ordenado en paralelo según el pivote int* copia = malloc(sizeof(int) * (end - start)); //servirá como vector auxiliar para no pisar los datos en el array original srand(time(NULL)); pivot = array[rand() % (end - start) + start]; printf("pivot: %d\n", pivot); for(i = 0; i < threads; i++) //se lanzan tantas tareas como hilos se hayan asignado a el array (o trozo del array) repartiendo las posiciones equitativamente entre los hilos { if (i+1 == threads) { #pragma omp task ordena(array, start + (i*((end - start) / threads)), end, pivot, lvec); } else { #pragma omp task ordena(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), pivot, lvec); } } #pragma omp taskwait //esperamos a que los trozos hayan sido ordenados for (i = start; i < end; i++) //rellenamos la variable @u con el número total de elementos menores que el pivote if(lvec[i] == 1) u++; for(i = 0; i < threads; i++) //uno las partes menores que el pivote a la izquierda del vector y las mayores a la derecha en paralelo con tareas if (i+1 == threads) #pragma omp task merge(array, start + (i*((end - start) / threads)), end, lvec, u, copia); else #pragma omp task merge(array, start + (i*((end - start) / threads)), start + ((i + 1)*((end - start) / threads)), lvec, u, copia); #pragma omp taskwait //en este punto tengo en @copia el vector ordenado para esta iteración //En función al tamaño de la parte menor y mayor que el pivote, reparto el número de hilos disponible int repartohilos = (int) (((float) u / (end - start) ) * threads + 0.5 ); int rpeartohilos2 = (int) (((float) ((end - start) - u) / (end - start) ) * threads + 0.5 ); //copio @copia en @array, esto debería hacerse sólo al final, pasando simplemente @copia durante la recursión for(i = start; i < end; i++) array[i] = copia[i]; /* #pragma omp task quicksort(array, start, start + u, repartohilos); #pragma omp task quicksort(array, start + u, end, rpeartohilos2);*/ } void merge(int* array, int start, int end, int* lvec, int u, int* copia) { int l = 0, i; for (i = 0; i < start; i++) if(lvec[i]==1) l++; else u++; for (i = start; i < end; i++) if(lvec[i] == 1) { copia[l] = array[i]; l++; } else { copia[u] = array[i]; u++; } } int ordena(int *array, int start, int end, int pivot, int* lvec) { int i, tmp, numElementos = 0; for(i = start; i < end; i++) if (array[i] <= pivot) { lvec[start] = 1; numElementos++; tmp = array[start]; array[start] = array[i]; array[i] = tmp; start++; } return numElementos; }
userver.c
/* MIT License Copyright (c) 2017 Emanuele Giona Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "userver.h" int XOR(int a, int b){ return a^b; } int fileXOR(char srcfile[], char dstfile[], long dim, int seed){ //apertura file int src=open(srcfile,O_RDWR); if(src<0){ sprintf(lastError,"Errore apertura file %s.\n",srcfile); return 400; } int dst=open(dstfile,O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if(dst<0){ sprintf(lastError,"Errore apertura file %s.\n",dstfile); close(src); return 400; } //lock file if(lockf(src,F_TLOCK,0)<0){ sprintf(lastError,"Errore lock su file %s.\n",srcfile); close(src); close(dst); return 500; } if(lockf(dst,F_TLOCK,0)<0){ sprintf(lastError,"Errore lock su file %s.\n",dstfile); lockf(src,F_ULOCK,0); close(src); close(dst); return 500; } //arrivo in fondo al file di output int result=lseek(dst, dim-1, SEEK_SET); if (result==-1) { sprintf(lastError,"Errore stretch file %s.\n",dstfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //scrivo un placeholder per mantenere le modifiche di dimensione result=write(dst,"",1); if (result!=1) { sprintf(lastError,"Errore scrittura su file %s.\n",dstfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //imposto il seed per rand() srand(seed); //thread non necessari sotto 256KB if(dim<=256*1024){ long freePages=sysconf(_SC_AVPHYS_PAGES); long pageDim=sysconf(_SC_PAGESIZE); long freeMem=freePages*pageDim; if(freeMem<=3*dim){ sprintf(lastError,"RAM insufficiente per aprire il file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //mapping dell'intero file char *srcmap=(char *)mmap(NULL,dim,PROT_READ,MAP_PRIVATE,src,0); if((void *)srcmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } char *dstmap=(char *)mmap(NULL,dim,PROT_READ | PROT_WRITE,MAP_SHARED,dst,0); if((void *)dstmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s.\n",dstfile); munmap((void *)srcmap,dim); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //array della chiave per lo XOR, 4 byte consecutivi con la stessa chiave long keyDim=(long)ceil((double)dim/4)*4; int key[keyDim]; for(long i=0;i<keyDim;i++){ key[i]=rand()%65536; //limite del numero generato, per portabilita' tra compilatori } //effettua lo XOR e scrivi nel mapping, byte a byte long i,j; for(i=0,j=0;i<dim && j<keyDim;i+=4,j++){ dstmap[i]=(char)(XOR((int)srcmap[i],key[j])); dstmap[i+1]=(char)(XOR((int)srcmap[i+1],key[j])); dstmap[i+2]=(char)(XOR((int)srcmap[i+2],key[j])); dstmap[i+3]=(char)(XOR((int)srcmap[i+3],key[j])); } munmap((void *)srcmap,dim); munmap((void *)dstmap,dim); } //sono necessari thread, utilizzo OpenMP; suggerito: 1 thread omp per ogni blocco di 256KB - limite 7MB tutti in blocco else{ long fiveMB=5*pow(2,20); int chunks=(int)ceil((double)dim/fiveMB); for(int c=0;c<chunks;c++){ long freePages=sysconf(_SC_AVPHYS_PAGES); long pageDim=sysconf(_SC_PAGESIZE); long freeMem=freePages*pageDim; if(freeMem<=2*fiveMB){ sprintf(lastError,"RAM insufficiente per aprire il file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } long start=(c)*fiveMB; long end=(c+1)*fiveMB; long realEnd=end; if(dim<realEnd) realEnd=dim; long chunkDim=realEnd-start; if(dim-start<chunkDim) chunkDim=dim-start; //mapping del chunk c char *srcmap=(char *)mmap(NULL,chunkDim,PROT_READ,MAP_PRIVATE,src,start); if((void *)srcmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s, chunk #%i.\n",srcfile,c); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } char *dstmap=(char *)mmap(NULL,chunkDim,PROT_READ | PROT_WRITE,MAP_SHARED,dst,start); if((void *)dstmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s, chunk #%i.\n",dstfile,c); munmap((void *)srcmap,chunkDim); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //1 thread OpenMP ogni 256KB int mpThreads=(int)ceil((double)chunkDim/(256*1024)); //matrice della chiave per lo XOR, 4 byte consecutivi con la stessa chiave //ogni thread OpenMP ha il suo array di dimensione ridotta long keyDimT = (long)ceil((double)chunkDim / (mpThreads * 4)); int key[mpThreads][keyDimT]; for(long j=0;j<mpThreads;j++){ for(long i=0;i<keyDimT;i++){ key[j][i]=rand()%65536; //limite del numero generato, per portabilita' tra compilatori } } #pragma omp parallel num_threads(mpThreads) { int threadID=omp_get_thread_num(); int min=(threadID)*256*1024; int max=(threadID+1)*256*1024; //effettua lo XOR e scrivi nel mapping, byte a byte con ogni thread unicamente nella sua sezione for(long i=min;i<max && i<chunkDim;i+=4){ int val=key[threadID][(i-min)/4]; dstmap[i]=(char)(XOR((int)srcmap[i],val)); dstmap[i+1]=(char)(XOR((int)srcmap[i+1],val)); dstmap[i+2]=(char)(XOR((int)srcmap[i+2],val)); dstmap[i+3]=(char)(XOR((int)srcmap[i+3],val)); } } munmap((void *)srcmap,chunkDim); munmap((void *)dstmap,chunkDim); } } //fine lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 200; } int sendMessage(int sock, char message[]){ char buf[BUFSIZE]; memset(buf,0,BUFSIZE); strncpy(buf,message,BUFSIZE); int msglen=write(sock,buf,BUFSIZE); if(msglen<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore write sul socket.\n"); writeLog(LOGFILE,toLog); return 1; } return 0; } int encrypt(char src[], int seed, int sock){ char dst[PATHLEN]=""; strncpy(dst,src,strlen(src)); strncat(dst,"_enc",5); long dim=-1; struct stat st; if(stat(src, &st) == 0) dim=st.st_size; if(dim==-1){ if(errno==ENOENT){ sprintf(lastError,"File %s non esistente.\n",src); return 400; } else{ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",src); return 500; } } int ret=fileXOR(src,dst,dim,seed); if(ret==200 && unlink(src)){ sprintf(lastError,"Errore nella cancellazione del file %s.\n",src); return 500; } return ret; } int decrypt(char src[], int seed, int sock){ char *enc=NULL; char *temp = strstr(src, "_enc"); while (temp) { enc = temp++; temp = strstr(temp, "_enc"); } if(enc==NULL || strlen(enc)!=4){ sprintf(lastError,"Il file %s non e' un file cifrato.\n",src); return 400; } char dst[PATHLEN]=""; strncpy(dst,src,strlen(src)-4); long dim=-1; struct stat st; if(stat(src, &st) == 0) dim=st.st_size; if(dim==-1){ if(errno==ENOENT){ sprintf(lastError,"File %s non esistente.\n",src); return 400; } else{ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",src); return 500; } } int ret=fileXOR(src,dst,dim,seed); if(ret==200 && unlink(src)){ sprintf(lastError,"Errore nella cancellazione del file %s.\n",src); return 500; } return ret; } int listFolder(char folder[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ sprintf(lastError,"Errore apertura directory %s.\n",folder); return 400; } while(true){ struct dirent *val=NULL; char path[PATHLEN]=""; char entry[PATHLEN+50]=""; memset(path,0,sizeof(path)); memset(entry,0,sizeof(entry)); val=readdir(dir); if(val==NULL){ break; } if(strcmp(val->d_name,".")==0 || strcmp(val->d_name,"..")==0 || (val->d_type & DT_DIR)) continue; strncpy(path,folder,PATHLEN); if(strstr(path+(strlen(path)-1),"/")==NULL) strncat(path,"/",1); strncat(path,val->d_name,PATHLEN-strlen(path)); long dim=-1; struct stat st; if(stat(path, &st) == 0) dim=st.st_size; if(dim==-1){ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",path); return 500; } sprintf(entry,"%li %s",dim,path); sendMessage(sock,entry); sendMessage(sock,"\r\n"); } return 200; } int listRecursive(char folder[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ sprintf(lastError,"Errore apertura directory %s.\n",folder); return 400; } while(true){ struct dirent *val=NULL; char path[PATHLEN]=""; char entry[PATHLEN+50]=""; memset(path,0,sizeof(path)); memset(entry,0,sizeof(entry)); val=readdir(dir); if(val==NULL){ break; } if(strcmp(val->d_name,".")==0 || strcmp(val->d_name,"..")==0) continue; strncpy(path,folder,PATHLEN); if(strstr(path+(strlen(path)-1),"/")==NULL) strncat(path,"/",1); strncat(path,val->d_name,PATHLEN-strlen(path)); if(!(val->d_type & DT_DIR)){ long dim=-1; struct stat st; if(stat(path, &st) == 0) dim=st.st_size; if(dim==-1){ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",path); return 500; } sprintf(entry,"%li %s",dim,path); } else{ if(dir){ int ret=listRecursive(path,sock); if(ret!=200) return ret; } else{ sprintf(lastError,"Errore nell'apertura della directory %s.\n",path); return 500; } } if(strcmp(entry,"")!=0) sendMessage(sock,entry); sendMessage(sock,"\r\n"); } closedir(dir); return 200; } int parseRequest(char folder[], char message[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ return 1; } int ret=0; if(strstr(message,"LSTF")!=NULL){ sendMessage(sock,STATE_PENDING); ret=listFolder(folder,sock); sendMessage(sock,"\r\n.\r\n"); } else if(strstr(message,"LSTR")!=NULL){ sendMessage(sock,STATE_PENDING); ret=listRecursive(folder,sock); sendMessage(sock,"\r\n.\r\n"); } else if(strstr(message,"ENCR")!=NULL){ char s[4]=""; unsigned int seed=-1; char path[PATHLEN]="errore"; sscanf(message,"%s %u %[^\n]%*s",s,&seed,path); if(seed!=-1 && strcmp(path,"errore")!=0){ ret=encrypt(path,seed,sock); } } else if(strstr(message,"DECR")!=NULL){ char s[4]=""; unsigned int seed=-1; char path[PATHLEN]="errore"; sscanf(message,"%s %u %[^\n]%*s",s,&seed,path); if(seed!=-1 && strcmp(path,"errore")!=0){ ret=decrypt(path,seed,sock); } } //gestione codici di risposta if(ret==200){ sendMessage(sock,STATE_OK); } else if(ret==400){ sendMessage(sock,lastError); sendMessage(sock,STATE_ERROR); } else if(ret==500){ sendMessage(sock,lastError); sendMessage(sock,STATE_UNAVAIL); } return ret; } int addRequest(pthread_mutex_t *mutex,pthread_cond_t *cond,char *folder,char *address,char *message,int sock){ struct request *req=(struct request *)malloc(sizeof(struct request)); if(!req){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore malloc richiesta.\n"); writeLog(LOGFILE,toLog); return 1; } pthread_mutex_lock(mutex); req->ID=nextReqID; req->folder=folder; req->address=address; req->message=message; req->sock=sock; req->next=NULL; char toLog[BUFSIZE]=""; sprintf(toLog,"[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message); writeLog(LOGFILE,toLog); //printf("[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message); if(numReqs==0) first=req; else last->next=req; last=req; numReqs++; pthread_cond_broadcast(cond); pthread_mutex_unlock(mutex); nextReqID++; return 0; } struct request* removeRequest(pthread_mutex_t *mutex){ struct request *req; pthread_mutex_lock(mutex); if(numReqs>0){ req=first; first=req->next; if(first==NULL) last=NULL; numReqs--; } else{ req=NULL; } pthread_mutex_unlock(mutex); return req; } void *task(void *arg){ int *threadID=(int *)arg; struct request *req; while(run){ pthread_mutex_lock(&reqMutex); int r=numReqs; pthread_mutex_unlock(&reqMutex); if(r>0){ pthread_mutex_lock(&reqMutex); req=removeRequest(&reqMutex); pthread_mutex_unlock(&reqMutex); if(req){ char *folder=req->folder; char *message=req->message; int sock=req->sock; int reqID=req->ID; //printf("[Richiesta #%i] [Thread #%i - assegnata]\n",reqID,*threadID); int ret=parseRequest(folder, message, sock); char toLog[BUFSIZE]=""; sprintf(toLog,"[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret); writeLog(LOGFILE,toLog); //printf("[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret); free(req); close(sock); } } else{ pthread_mutex_lock(&reqMutex); pthread_cond_wait(&reqCond,&reqMutex); pthread_mutex_unlock(&reqMutex); } } return NULL; } int executeServer(char folder[], unsigned short port, int threadNum){ DIR* dir=opendir(folder); if(dir){ closedir(dir); //crea socket in ascolto int serverSock; struct sockaddr_in serveraddr; int optval; int msglen; serverSock=socket(AF_INET, SOCK_STREAM, 0); if(serverSock<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore apertura socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore apertura socket.\n"); return 1; } //riutilizzo indirizzo in modo veloce optval=1; setsockopt(serverSock, SOL_SOCKET, SO_REUSEADDR, (const void*)&optval, sizeof(int)); memset((char *)&serveraddr,0,sizeof(serveraddr)); serveraddr.sin_family = AF_INET; serveraddr.sin_addr.s_addr=htonl(INADDR_ANY); serveraddr.sin_port=htons(port); if(bind(serverSock, (struct sockaddr *)&serveraddr, sizeof(serveraddr))<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore binding sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore binding sul socket.\n"); return 1; } if(listen(serverSock,5)<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore listening sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore listening sul socket.\n"); return 1; } //crea thread pool int threadID[threadNum]; pthread_t threads[threadNum]; for(int i=0;i<threadNum;i++){ threadID[i]=i; pthread_create(&threads[i],NULL,task,(void *)&threadID[i]); } //ricevi richiesta ed inseriscila in coda, da processare da un thread int clientSock; struct sockaddr_in clientAddr; char message[BUFSIZE]; unsigned int clientlen=sizeof(clientAddr); while(true){ //ricevi connessione dal client clientSock=accept(serverSock, (struct sockaddr *)&clientAddr, &clientlen); if(clientSock<0){ break; } //ottiene l'indirizzo del client char clientAddrReadable[NI_MAXHOST]; if (getnameinfo((const struct sockaddr *)&clientAddr, clientlen, clientAddrReadable, sizeof(clientAddrReadable), NULL, sizeof(NULL), NI_NUMERICHOST) != 0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore risoluzione client.\n"); writeLog(LOGFILE,toLog); break; } //ottieni la richiesta inviata dal client memset(message,0,BUFSIZE); msglen=read(clientSock,message,BUFSIZE); if(msglen<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore read sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore read sul socket.\n"); break; } //aggiungi la richiesta in coda if(addRequest(&reqMutex,&reqCond,folder,clientAddrReadable,message,clientSock)!=0){ break; } } //chiudi socket close(serverSock); //attendi chiusura pthread for(int i=0;i<threadNum;i++){ pthread_join(threads[i],NULL); } } else if(ENOENT == errno || ENOTDIR == errno){ char toLog[BUFSIZE]=""; sprintf(toLog,"La cartella %s non e' una directory valida o non esiste.\n",folder); writeLog(LOGFILE,toLog); //printf("La cartella %s non e' una directory valida o non esiste.\n",folder); return 1; } return 0; } void showHelp(char *command){ printf("server~ "); if(strcmp(command,"-h")!=0) printf("Comando non valido.\n\t"); printf("Usage: {comando_1} [valore_1] ... {comando_n} [valore_n]\n\t\ Ogni valore e' marcato come opzionale, ma puo' essere obbligatorio a seconda del comando che lo precede.\n\n\t\ Comandi (valori obbligatori):\n\t\ -c\t obbligatorio, specifica la cartella di partenza\n\t\ \t ignora la voce folder=<dir/to/start/with>\n\t\ -p\t specifica la porta TCP sulla quale restare in ascolto; default: 8888\n\t\ \t ignora la voce port=<portNum>\n\t\ -n\t specifica il numero di thread da utilizzare; default: 1\n\t\ \t ignora la voce threadNumber=<threadNum>\n\n\t\ Comandi (nessun valore necessario):\n\t\ -h\t mostra questo messaggio\n\n\t\ Dettagli:\n\t\ Tutti i parametri possono essere definiti tramite il file misc/server.conf, ma ignorati se specificati tramite riga di comando.\n\t\ In particolare, l'opzione -c non e' obbligatoria se la cartella e' specificata in tale file.\n"); return; } int main(int argc, char *argv[]){ int r=mkdir("misc",0777); if(r!=0 && errno!=EEXIST){ printf("Errore creazione directory di log.\n"); return 1; } FILE *srvlog=fopen(LOGFILE,"w"); if(srvlog==NULL){ printf("Errore creazione file di log.\n"); return 1; } fclose(srvlog); memset(folder,0,PATHLEN); port=0; threadNum=-1; loadConfig(&port,folder,&threadNum); if(argc>1){ for(int i=1;i<argc;i++){ if(strcmp(argv[i],"-c")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ memset(folder,0,PATHLEN); strncpy(folder,argv[i+1],strlen(argv[i+1])); i++; } else{ showHelp(argv[i]); } } else if(strcmp(argv[i],"-p")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ port=(unsigned short)atoi(argv[i+1]); i++; } else{ showHelp(argv[i]); } } else if(strcmp(argv[i],"-n")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ threadNum=atoi(argv[i+1]); i++; } else{ showHelp(argv[i]); } } else showHelp(argv[i]); } } if(strcmp(folder,"\0")==0){ showHelp(argv[0]); return 1; } makeDaemon(); struct sigaction sa; memset((char *)&sa,0,sizeof(sa)); sa.sa_handler=sigHandler; if(sigaction(SIGHUP, &sa, NULL)<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore sigaction\n"); writeLog(LOGFILE,toLog); return 1; } //inizializzazione variabili globali nextReqID=0; numReqs=0; reqMutex=(pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; reqCond=(pthread_cond_t)PTHREAD_COND_INITIALIZER; run=true; while(true){ executeServer(folder,port,threadNum); //executeServer termina all'arrivo del SIGHUP, rileggo il file if(loadConfig(&port,folder,&threadNum)!=0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore lettura del file di configurazione.\n"); writeLog(LOGFILE,toLog); return 1; } run=true; } return 0; } static void makeDaemon(){ pid_t pid = fork(); if(pid<0){ printf("Errore fork\n"); exit(EXIT_FAILURE); } //tutto okay, il genitore può terminare if(pid>0) exit(EXIT_SUCCESS); //leader sessione if(setsid()<0) exit(EXIT_FAILURE); //ignoro segnali struct sigaction sa; memset((char *)&sa,0,sizeof(sa)); sa.sa_handler=sigIgnorer; if(sigaction(SIGHUP, &sa, NULL)<0){ printf("Errore sigaction.\n"); exit(EXIT_FAILURE); } if(sigaction(SIGCHLD, &sa, NULL)<0){ printf("Errore sigaction.\n"); exit(EXIT_FAILURE); } //secondo fork pid=fork(); if(pid<0){ printf("Errore fork\n"); exit(EXIT_FAILURE); } //tutto okay, il genitore può terminare if(pid>0) exit(EXIT_SUCCESS); //scrivo pid nel file di log pid=getpid(); char toLog[BUFSIZE]=""; sprintf(toLog,"Server avviato. PID: %ld\n",(long)pid); writeLog(LOGFILE,toLog); //chiudi tutti i descrittori aperti for(int i=sysconf(_SC_OPEN_MAX);i>=0;i--){ close(i); } } void sigIgnorer(int signal){ // } void sigHandler(int signal){ run=false; pthread_mutex_lock(&reqMutex); pthread_cond_broadcast(&reqCond); pthread_mutex_unlock(&reqMutex); }
#include "userver.h" int XOR(int a, int b){ return a^b; } int fileXOR(char srcfile[], char dstfile[], long dim, int seed){ //apertura file int src=open(srcfile,O_RDWR); if(src<0){ sprintf(lastError,"Errore apertura file %s.\n",srcfile); return 400; } int dst=open(dstfile,O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if(dst<0){ sprintf(lastError,"Errore apertura file %s.\n",dstfile); close(src); return 400; } //lock file if(lockf(src,F_TLOCK,0)<0){ sprintf(lastError,"Errore lock su file %s.\n",srcfile); close(src); close(dst); return 500; } if(lockf(dst,F_TLOCK,0)<0){ sprintf(lastError,"Errore lock su file %s.\n",dstfile); lockf(src,F_ULOCK,0); close(src); close(dst); return 500; } //arrivo in fondo al file di output int result=lseek(dst, dim-1, SEEK_SET); if (result==-1) { sprintf(lastError,"Errore stretch file %s.\n",dstfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //scrivo un placeholder per mantenere le modifiche di dimensione result=write(dst,"",1); if (result!=1) { sprintf(lastError,"Errore scrittura su file %s.\n",dstfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //imposto il seed per rand() srand(seed); //thread non necessari sotto 256KB if(dim<=256*1024){ long freePages=sysconf(_SC_AVPHYS_PAGES); long pageDim=sysconf(_SC_PAGESIZE); long freeMem=freePages*pageDim; if(freeMem<=3*dim){ sprintf(lastError,"RAM insufficiente per aprire il file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //mapping dell'intero file char *srcmap=(char *)mmap(NULL,dim,PROT_READ,MAP_PRIVATE,src,0); if((void *)srcmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } char *dstmap=(char *)mmap(NULL,dim,PROT_READ | PROT_WRITE,MAP_SHARED,dst,0); if((void *)dstmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s.\n",dstfile); munmap((void *)srcmap,dim); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //array della chiave per lo XOR, 4 byte consecutivi con la stessa chiave long keyDim=(long)ceil((double)dim/4)*4; int key[keyDim]; for(long i=0;i<keyDim;i++){ key[i]=rand()%65536; //limite del numero generato, per portabilita' tra compilatori } //effettua lo XOR e scrivi nel mapping, byte a byte long i,j; for(i=0,j=0;i<dim && j<keyDim;i+=4,j++){ dstmap[i]=(char)(XOR((int)srcmap[i],key[j])); dstmap[i+1]=(char)(XOR((int)srcmap[i+1],key[j])); dstmap[i+2]=(char)(XOR((int)srcmap[i+2],key[j])); dstmap[i+3]=(char)(XOR((int)srcmap[i+3],key[j])); } munmap((void *)srcmap,dim); munmap((void *)dstmap,dim); } //sono necessari thread, utilizzo OpenMP; suggerito: 1 thread omp per ogni blocco di 256KB - limite 7MB tutti in blocco else{ long fiveMB=5*pow(2,20); int chunks=(int)ceil((double)dim/fiveMB); for(int c=0;c<chunks;c++){ long freePages=sysconf(_SC_AVPHYS_PAGES); long pageDim=sysconf(_SC_PAGESIZE); long freeMem=freePages*pageDim; if(freeMem<=2*fiveMB){ sprintf(lastError,"RAM insufficiente per aprire il file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } long start=(c)*fiveMB; long end=(c+1)*fiveMB; long realEnd=end; if(dim<realEnd) realEnd=dim; long chunkDim=realEnd-start; if(dim-start<chunkDim) chunkDim=dim-start; //mapping del chunk c char *srcmap=(char *)mmap(NULL,chunkDim,PROT_READ,MAP_PRIVATE,src,start); if((void *)srcmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s, chunk #%i.\n",srcfile,c); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } char *dstmap=(char *)mmap(NULL,chunkDim,PROT_READ | PROT_WRITE,MAP_SHARED,dst,start); if((void *)dstmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s, chunk #%i.\n",dstfile,c); munmap((void *)srcmap,chunkDim); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //1 thread OpenMP ogni 256KB int mpThreads=(int)ceil((double)chunkDim/(256*1024)); //matrice della chiave per lo XOR, 4 byte consecutivi con la stessa chiave //ogni thread OpenMP ha il suo array di dimensione ridotta long keyDimT = (long)ceil((double)chunkDim / (mpThreads * 4)); int key[mpThreads][keyDimT]; for(long j=0;j<mpThreads;j++){ for(long i=0;i<keyDimT;i++){ key[j][i]=rand()%65536; //limite del numero generato, per portabilita' tra compilatori } } int threadID=omp_get_thread_num(); int min=(threadID)*256*1024; int max=(threadID+1)*256*1024; //effettua lo XOR e scrivi nel mapping, byte a byte con ogni thread unicamente nella sua sezione for(long i=min;i<max && i<chunkDim;i+=4){ int val=key[threadID][(i-min)/4]; dstmap[i]=(char)(XOR((int)srcmap[i],val)); dstmap[i+1]=(char)(XOR((int)srcmap[i+1],val)); dstmap[i+2]=(char)(XOR((int)srcmap[i+2],val)); dstmap[i+3]=(char)(XOR((int)srcmap[i+3],val)); } munmap((void *)srcmap,chunkDim); munmap((void *)dstmap,chunkDim); } } //fine lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 200; } int sendMessage(int sock, char message[]){ char buf[BUFSIZE]; memset(buf,0,BUFSIZE); strncpy(buf,message,BUFSIZE); int msglen=write(sock,buf,BUFSIZE); if(msglen<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore write sul socket.\n"); writeLog(LOGFILE,toLog); return 1; } return 0; } int encrypt(char src[], int seed, int sock){ char dst[PATHLEN]=""; strncpy(dst,src,strlen(src)); strncat(dst,"_enc",5); long dim=-1; struct stat st; if(stat(src, &st) == 0) dim=st.st_size; if(dim==-1){ if(errno==ENOENT){ sprintf(lastError,"File %s non esistente.\n",src); return 400; } else{ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",src); return 500; } } int ret=fileXOR(src,dst,dim,seed); if(ret==200 && unlink(src)){ sprintf(lastError,"Errore nella cancellazione del file %s.\n",src); return 500; } return ret; } int decrypt(char src[], int seed, int sock){ char *enc=NULL; char *temp = strstr(src, "_enc"); while (temp) { enc = temp++; temp = strstr(temp, "_enc"); } if(enc==NULL || strlen(enc)!=4){ sprintf(lastError,"Il file %s non e' un file cifrato.\n",src); return 400; } char dst[PATHLEN]=""; strncpy(dst,src,strlen(src)-4); long dim=-1; struct stat st; if(stat(src, &st) == 0) dim=st.st_size; if(dim==-1){ if(errno==ENOENT){ sprintf(lastError,"File %s non esistente.\n",src); return 400; } else{ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",src); return 500; } } int ret=fileXOR(src,dst,dim,seed); if(ret==200 && unlink(src)){ sprintf(lastError,"Errore nella cancellazione del file %s.\n",src); return 500; } return ret; } int listFolder(char folder[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ sprintf(lastError,"Errore apertura directory %s.\n",folder); return 400; } while(true){ struct dirent *val=NULL; char path[PATHLEN]=""; char entry[PATHLEN+50]=""; memset(path,0,sizeof(path)); memset(entry,0,sizeof(entry)); val=readdir(dir); if(val==NULL){ break; } if(strcmp(val->d_name,".")==0 || strcmp(val->d_name,"..")==0 || (val->d_type & DT_DIR)) continue; strncpy(path,folder,PATHLEN); if(strstr(path+(strlen(path)-1),"/")==NULL) strncat(path,"/",1); strncat(path,val->d_name,PATHLEN-strlen(path)); long dim=-1; struct stat st; if(stat(path, &st) == 0) dim=st.st_size; if(dim==-1){ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",path); return 500; } sprintf(entry,"%li %s",dim,path); sendMessage(sock,entry); sendMessage(sock,"\r\n"); } return 200; } int listRecursive(char folder[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ sprintf(lastError,"Errore apertura directory %s.\n",folder); return 400; } while(true){ struct dirent *val=NULL; char path[PATHLEN]=""; char entry[PATHLEN+50]=""; memset(path,0,sizeof(path)); memset(entry,0,sizeof(entry)); val=readdir(dir); if(val==NULL){ break; } if(strcmp(val->d_name,".")==0 || strcmp(val->d_name,"..")==0) continue; strncpy(path,folder,PATHLEN); if(strstr(path+(strlen(path)-1),"/")==NULL) strncat(path,"/",1); strncat(path,val->d_name,PATHLEN-strlen(path)); if(!(val->d_type & DT_DIR)){ long dim=-1; struct stat st; if(stat(path, &st) == 0) dim=st.st_size; if(dim==-1){ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",path); return 500; } sprintf(entry,"%li %s",dim,path); } else{ if(dir){ int ret=listRecursive(path,sock); if(ret!=200) return ret; } else{ sprintf(lastError,"Errore nell'apertura della directory %s.\n",path); return 500; } } if(strcmp(entry,"")!=0) sendMessage(sock,entry); sendMessage(sock,"\r\n"); } closedir(dir); return 200; } int parseRequest(char folder[], char message[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ return 1; } int ret=0; if(strstr(message,"LSTF")!=NULL){ sendMessage(sock,STATE_PENDING); ret=listFolder(folder,sock); sendMessage(sock,"\r\n.\r\n"); } else if(strstr(message,"LSTR")!=NULL){ sendMessage(sock,STATE_PENDING); ret=listRecursive(folder,sock); sendMessage(sock,"\r\n.\r\n"); } else if(strstr(message,"ENCR")!=NULL){ char s[4]=""; unsigned int seed=-1; char path[PATHLEN]="errore"; sscanf(message,"%s %u %[^\n]%*s",s,&seed,path); if(seed!=-1 && strcmp(path,"errore")!=0){ ret=encrypt(path,seed,sock); } } else if(strstr(message,"DECR")!=NULL){ char s[4]=""; unsigned int seed=-1; char path[PATHLEN]="errore"; sscanf(message,"%s %u %[^\n]%*s",s,&seed,path); if(seed!=-1 && strcmp(path,"errore")!=0){ ret=decrypt(path,seed,sock); } } //gestione codici di risposta if(ret==200){ sendMessage(sock,STATE_OK); } else if(ret==400){ sendMessage(sock,lastError); sendMessage(sock,STATE_ERROR); } else if(ret==500){ sendMessage(sock,lastError); sendMessage(sock,STATE_UNAVAIL); } return ret; } int addRequest(pthread_mutex_t *mutex,pthread_cond_t *cond,char *folder,char *address,char *message,int sock){ struct request *req=(struct request *)malloc(sizeof(struct request)); if(!req){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore malloc richiesta.\n"); writeLog(LOGFILE,toLog); return 1; } pthread_mutex_lock(mutex); req->ID=nextReqID; req->folder=folder; req->address=address; req->message=message; req->sock=sock; req->next=NULL; char toLog[BUFSIZE]=""; sprintf(toLog,"[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message); writeLog(LOGFILE,toLog); //printf("[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message); if(numReqs==0) first=req; else last->next=req; last=req; numReqs++; pthread_cond_broadcast(cond); pthread_mutex_unlock(mutex); nextReqID++; return 0; } struct request* removeRequest(pthread_mutex_t *mutex){ struct request *req; pthread_mutex_lock(mutex); if(numReqs>0){ req=first; first=req->next; if(first==NULL) last=NULL; numReqs--; } else{ req=NULL; } pthread_mutex_unlock(mutex); return req; } void *task(void *arg){ int *threadID=(int *)arg; struct request *req; while(run){ pthread_mutex_lock(&reqMutex); int r=numReqs; pthread_mutex_unlock(&reqMutex); if(r>0){ pthread_mutex_lock(&reqMutex); req=removeRequest(&reqMutex); pthread_mutex_unlock(&reqMutex); if(req){ char *folder=req->folder; char *message=req->message; int sock=req->sock; int reqID=req->ID; //printf("[Richiesta #%i] [Thread #%i - assegnata]\n",reqID,*threadID); int ret=parseRequest(folder, message, sock); char toLog[BUFSIZE]=""; sprintf(toLog,"[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret); writeLog(LOGFILE,toLog); //printf("[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret); free(req); close(sock); } } else{ pthread_mutex_lock(&reqMutex); pthread_cond_wait(&reqCond,&reqMutex); pthread_mutex_unlock(&reqMutex); } } return NULL; } int executeServer(char folder[], unsigned short port, int threadNum){ DIR* dir=opendir(folder); if(dir){ closedir(dir); //crea socket in ascolto int serverSock; struct sockaddr_in serveraddr; int optval; int msglen; serverSock=socket(AF_INET, SOCK_STREAM, 0); if(serverSock<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore apertura socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore apertura socket.\n"); return 1; } //riutilizzo indirizzo in modo veloce optval=1; setsockopt(serverSock, SOL_SOCKET, SO_REUSEADDR, (const void*)&optval, sizeof(int)); memset((char *)&serveraddr,0,sizeof(serveraddr)); serveraddr.sin_family = AF_INET; serveraddr.sin_addr.s_addr=htonl(INADDR_ANY); serveraddr.sin_port=htons(port); if(bind(serverSock, (struct sockaddr *)&serveraddr, sizeof(serveraddr))<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore binding sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore binding sul socket.\n"); return 1; } if(listen(serverSock,5)<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore listening sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore listening sul socket.\n"); return 1; } //crea thread pool int threadID[threadNum]; pthread_t threads[threadNum]; for(int i=0;i<threadNum;i++){ threadID[i]=i; pthread_create(&threads[i],NULL,task,(void *)&threadID[i]); } //ricevi richiesta ed inseriscila in coda, da processare da un thread int clientSock; struct sockaddr_in clientAddr; char message[BUFSIZE]; unsigned int clientlen=sizeof(clientAddr); while(true){ //ricevi connessione dal client clientSock=accept(serverSock, (struct sockaddr *)&clientAddr, &clientlen); if(clientSock<0){ break; } //ottiene l'indirizzo del client char clientAddrReadable[NI_MAXHOST]; if (getnameinfo((const struct sockaddr *)&clientAddr, clientlen, clientAddrReadable, sizeof(clientAddrReadable), NULL, sizeof(NULL), NI_NUMERICHOST) != 0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore risoluzione client.\n"); writeLog(LOGFILE,toLog); break; } //ottieni la richiesta inviata dal client memset(message,0,BUFSIZE); msglen=read(clientSock,message,BUFSIZE); if(msglen<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore read sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore read sul socket.\n"); break; } //aggiungi la richiesta in coda if(addRequest(&reqMutex,&reqCond,folder,clientAddrReadable,message,clientSock)!=0){ break; } } //chiudi socket close(serverSock); //attendi chiusura pthread for(int i=0;i<threadNum;i++){ pthread_join(threads[i],NULL); } } else if(ENOENT == errno || ENOTDIR == errno){ char toLog[BUFSIZE]=""; sprintf(toLog,"La cartella %s non e' una directory valida o non esiste.\n",folder); writeLog(LOGFILE,toLog); //printf("La cartella %s non e' una directory valida o non esiste.\n",folder); return 1; } return 0; } void showHelp(char *command){ printf("server~ "); if(strcmp(command,"-h")!=0) printf("Comando non valido.\n\t"); printf("Usage: {comando_1} [valore_1] ... {comando_n} [valore_n]\n\t\ Ogni valore e' marcato come opzionale, ma puo' essere obbligatorio a seconda del comando che lo precede.\n\n\t\ Comandi (valori obbligatori):\n\t\ -c\t obbligatorio, specifica la cartella di partenza\n\t\ \t ignora la voce folder=<dir/to/start/with>\n\t\ -p\t specifica la porta TCP sulla quale restare in ascolto; default: 8888\n\t\ \t ignora la voce port=<portNum>\n\t\ -n\t specifica il numero di thread da utilizzare; default: 1\n\t\ \t ignora la voce threadNumber=<threadNum>\n\n\t\ Comandi (nessun valore necessario):\n\t\ -h\t mostra questo messaggio\n\n\t\ Dettagli:\n\t\ Tutti i parametri possono essere definiti tramite il file misc/server.conf, ma ignorati se specificati tramite riga di comando.\n\t\ In particolare, l'opzione -c non e' obbligatoria se la cartella e' specificata in tale file.\n"); return; } int main(int argc, char *argv[]){ int r=mkdir("misc",0777); if(r!=0 && errno!=EEXIST){ printf("Errore creazione directory di log.\n"); return 1; } FILE *srvlog=fopen(LOGFILE,"w"); if(srvlog==NULL){ printf("Errore creazione file di log.\n"); return 1; } fclose(srvlog); memset(folder,0,PATHLEN); port=0; threadNum=-1; loadConfig(&port,folder,&threadNum); if(argc>1){ for(int i=1;i<argc;i++){ if(strcmp(argv[i],"-c")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ memset(folder,0,PATHLEN); strncpy(folder,argv[i+1],strlen(argv[i+1])); i++; } else{ showHelp(argv[i]); } } else if(strcmp(argv[i],"-p")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ port=(unsigned short)atoi(argv[i+1]); i++; } else{ showHelp(argv[i]); } } else if(strcmp(argv[i],"-n")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ threadNum=atoi(argv[i+1]); i++; } else{ showHelp(argv[i]); } } else showHelp(argv[i]); } } if(strcmp(folder,"\0")==0){ showHelp(argv[0]); return 1; } makeDaemon(); struct sigaction sa; memset((char *)&sa,0,sizeof(sa)); sa.sa_handler=sigHandler; if(sigaction(SIGHUP, &sa, NULL)<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore sigaction\n"); writeLog(LOGFILE,toLog); return 1; } //inizializzazione variabili globali nextReqID=0; numReqs=0; reqMutex=(pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; reqCond=(pthread_cond_t)PTHREAD_COND_INITIALIZER; run=true; while(true){ executeServer(folder,port,threadNum); //executeServer termina all'arrivo del SIGHUP, rileggo il file if(loadConfig(&port,folder,&threadNum)!=0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore lettura del file di configurazione.\n"); writeLog(LOGFILE,toLog); return 1; } run=true; } return 0; } static void makeDaemon(){ pid_t pid = fork(); if(pid<0){ printf("Errore fork\n"); exit(EXIT_FAILURE); } //tutto okay, il genitore può terminare if(pid>0) exit(EXIT_SUCCESS); //leader sessione if(setsid()<0) exit(EXIT_FAILURE); //ignoro segnali struct sigaction sa; memset((char *)&sa,0,sizeof(sa)); sa.sa_handler=sigIgnorer; if(sigaction(SIGHUP, &sa, NULL)<0){ printf("Errore sigaction.\n"); exit(EXIT_FAILURE); } if(sigaction(SIGCHLD, &sa, NULL)<0){ printf("Errore sigaction.\n"); exit(EXIT_FAILURE); } //secondo fork pid=fork(); if(pid<0){ printf("Errore fork\n"); exit(EXIT_FAILURE); } //tutto okay, il genitore può terminare if(pid>0) exit(EXIT_SUCCESS); //scrivo pid nel file di log pid=getpid(); char toLog[BUFSIZE]=""; sprintf(toLog,"Server avviato. PID: %ld\n",(long)pid); writeLog(LOGFILE,toLog); //chiudi tutti i descrittori aperti for(int i=sysconf(_SC_OPEN_MAX);i>=0;i--){ close(i); } } void sigIgnorer(int signal){ // } void sigHandler(int signal){ run=false; pthread_mutex_lock(&reqMutex); pthread_cond_broadcast(&reqCond); pthread_mutex_unlock(&reqMutex); }
#include "userver.h" int XOR(int a, int b){ return a^b; } int fileXOR(char srcfile[], char dstfile[], long dim, int seed){ //apertura file int src=open(srcfile,O_RDWR); if(src<0){ sprintf(lastError,"Errore apertura file %s.\n",srcfile); return 400; } int dst=open(dstfile,O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if(dst<0){ sprintf(lastError,"Errore apertura file %s.\n",dstfile); close(src); return 400; } //lock file if(lockf(src,F_TLOCK,0)<0){ sprintf(lastError,"Errore lock su file %s.\n",srcfile); close(src); close(dst); return 500; } if(lockf(dst,F_TLOCK,0)<0){ sprintf(lastError,"Errore lock su file %s.\n",dstfile); lockf(src,F_ULOCK,0); close(src); close(dst); return 500; } //arrivo in fondo al file di output int result=lseek(dst, dim-1, SEEK_SET); if (result==-1) { sprintf(lastError,"Errore stretch file %s.\n",dstfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //scrivo un placeholder per mantenere le modifiche di dimensione result=write(dst,"",1); if (result!=1) { sprintf(lastError,"Errore scrittura su file %s.\n",dstfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //imposto il seed per rand() srand(seed); //thread non necessari sotto 256KB if(dim<=256*1024){ long freePages=sysconf(_SC_AVPHYS_PAGES); long pageDim=sysconf(_SC_PAGESIZE); long freeMem=freePages*pageDim; if(freeMem<=3*dim){ sprintf(lastError,"RAM insufficiente per aprire il file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //mapping dell'intero file char *srcmap=(char *)mmap(NULL,dim,PROT_READ,MAP_PRIVATE,src,0); if((void *)srcmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } char *dstmap=(char *)mmap(NULL,dim,PROT_READ | PROT_WRITE,MAP_SHARED,dst,0); if((void *)dstmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s.\n",dstfile); munmap((void *)srcmap,dim); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //array della chiave per lo XOR, 4 byte consecutivi con la stessa chiave long keyDim=(long)ceil((double)dim/4)*4; int key[keyDim]; for(long i=0;i<keyDim;i++){ key[i]=rand()%65536; //limite del numero generato, per portabilita' tra compilatori } //effettua lo XOR e scrivi nel mapping, byte a byte long i,j; for(i=0,j=0;i<dim && j<keyDim;i+=4,j++){ dstmap[i]=(char)(XOR((int)srcmap[i],key[j])); dstmap[i+1]=(char)(XOR((int)srcmap[i+1],key[j])); dstmap[i+2]=(char)(XOR((int)srcmap[i+2],key[j])); dstmap[i+3]=(char)(XOR((int)srcmap[i+3],key[j])); } munmap((void *)srcmap,dim); munmap((void *)dstmap,dim); } //sono necessari thread, utilizzo OpenMP; suggerito: 1 thread omp per ogni blocco di 256KB - limite 7MB tutti in blocco else{ long fiveMB=5*pow(2,20); int chunks=(int)ceil((double)dim/fiveMB); for(int c=0;c<chunks;c++){ long freePages=sysconf(_SC_AVPHYS_PAGES); long pageDim=sysconf(_SC_PAGESIZE); long freeMem=freePages*pageDim; if(freeMem<=2*fiveMB){ sprintf(lastError,"RAM insufficiente per aprire il file %s.\n",srcfile); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } long start=(c)*fiveMB; long end=(c+1)*fiveMB; long realEnd=end; if(dim<realEnd) realEnd=dim; long chunkDim=realEnd-start; if(dim-start<chunkDim) chunkDim=dim-start; //mapping del chunk c char *srcmap=(char *)mmap(NULL,chunkDim,PROT_READ,MAP_PRIVATE,src,start); if((void *)srcmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s, chunk #%i.\n",srcfile,c); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } char *dstmap=(char *)mmap(NULL,chunkDim,PROT_READ | PROT_WRITE,MAP_SHARED,dst,start); if((void *)dstmap==MAP_FAILED){ sprintf(lastError,"Errore file mapping su file %s, chunk #%i.\n",dstfile,c); munmap((void *)srcmap,chunkDim); lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 500; } //1 thread OpenMP ogni 256KB int mpThreads=(int)ceil((double)chunkDim/(256*1024)); //matrice della chiave per lo XOR, 4 byte consecutivi con la stessa chiave //ogni thread OpenMP ha il suo array di dimensione ridotta long keyDimT = (long)ceil((double)chunkDim / (mpThreads * 4)); int key[mpThreads][keyDimT]; for(long j=0;j<mpThreads;j++){ for(long i=0;i<keyDimT;i++){ key[j][i]=rand()%65536; //limite del numero generato, per portabilita' tra compilatori } } #pragma omp parallel num_threads(mpThreads) { int threadID=omp_get_thread_num(); int min=(threadID)*256*1024; int max=(threadID+1)*256*1024; //effettua lo XOR e scrivi nel mapping, byte a byte con ogni thread unicamente nella sua sezione for(long i=min;i<max && i<chunkDim;i+=4){ int val=key[threadID][(i-min)/4]; dstmap[i]=(char)(XOR((int)srcmap[i],val)); dstmap[i+1]=(char)(XOR((int)srcmap[i+1],val)); dstmap[i+2]=(char)(XOR((int)srcmap[i+2],val)); dstmap[i+3]=(char)(XOR((int)srcmap[i+3],val)); } } munmap((void *)srcmap,chunkDim); munmap((void *)dstmap,chunkDim); } } //fine lockf(src,F_ULOCK,0); lockf(dst,F_ULOCK,0); close(src); close(dst); return 200; } int sendMessage(int sock, char message[]){ char buf[BUFSIZE]; memset(buf,0,BUFSIZE); strncpy(buf,message,BUFSIZE); int msglen=write(sock,buf,BUFSIZE); if(msglen<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore write sul socket.\n"); writeLog(LOGFILE,toLog); return 1; } return 0; } int encrypt(char src[], int seed, int sock){ char dst[PATHLEN]=""; strncpy(dst,src,strlen(src)); strncat(dst,"_enc",5); long dim=-1; struct stat st; if(stat(src, &st) == 0) dim=st.st_size; if(dim==-1){ if(errno==ENOENT){ sprintf(lastError,"File %s non esistente.\n",src); return 400; } else{ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",src); return 500; } } int ret=fileXOR(src,dst,dim,seed); if(ret==200 && unlink(src)){ sprintf(lastError,"Errore nella cancellazione del file %s.\n",src); return 500; } return ret; } int decrypt(char src[], int seed, int sock){ char *enc=NULL; char *temp = strstr(src, "_enc"); while (temp) { enc = temp++; temp = strstr(temp, "_enc"); } if(enc==NULL || strlen(enc)!=4){ sprintf(lastError,"Il file %s non e' un file cifrato.\n",src); return 400; } char dst[PATHLEN]=""; strncpy(dst,src,strlen(src)-4); long dim=-1; struct stat st; if(stat(src, &st) == 0) dim=st.st_size; if(dim==-1){ if(errno==ENOENT){ sprintf(lastError,"File %s non esistente.\n",src); return 400; } else{ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",src); return 500; } } int ret=fileXOR(src,dst,dim,seed); if(ret==200 && unlink(src)){ sprintf(lastError,"Errore nella cancellazione del file %s.\n",src); return 500; } return ret; } int listFolder(char folder[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ sprintf(lastError,"Errore apertura directory %s.\n",folder); return 400; } while(true){ struct dirent *val=NULL; char path[PATHLEN]=""; char entry[PATHLEN+50]=""; memset(path,0,sizeof(path)); memset(entry,0,sizeof(entry)); val=readdir(dir); if(val==NULL){ break; } if(strcmp(val->d_name,".")==0 || strcmp(val->d_name,"..")==0 || (val->d_type & DT_DIR)) continue; strncpy(path,folder,PATHLEN); if(strstr(path+(strlen(path)-1),"/")==NULL) strncat(path,"/",1); strncat(path,val->d_name,PATHLEN-strlen(path)); long dim=-1; struct stat st; if(stat(path, &st) == 0) dim=st.st_size; if(dim==-1){ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",path); return 500; } sprintf(entry,"%li %s",dim,path); sendMessage(sock,entry); sendMessage(sock,"\r\n"); } return 200; } int listRecursive(char folder[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ sprintf(lastError,"Errore apertura directory %s.\n",folder); return 400; } while(true){ struct dirent *val=NULL; char path[PATHLEN]=""; char entry[PATHLEN+50]=""; memset(path,0,sizeof(path)); memset(entry,0,sizeof(entry)); val=readdir(dir); if(val==NULL){ break; } if(strcmp(val->d_name,".")==0 || strcmp(val->d_name,"..")==0) continue; strncpy(path,folder,PATHLEN); if(strstr(path+(strlen(path)-1),"/")==NULL) strncat(path,"/",1); strncat(path,val->d_name,PATHLEN-strlen(path)); if(!(val->d_type & DT_DIR)){ long dim=-1; struct stat st; if(stat(path, &st) == 0) dim=st.st_size; if(dim==-1){ sprintf(lastError,"Errore nel calcolo dimensione del file %s.\n",path); return 500; } sprintf(entry,"%li %s",dim,path); } else{ if(dir){ int ret=listRecursive(path,sock); if(ret!=200) return ret; } else{ sprintf(lastError,"Errore nell'apertura della directory %s.\n",path); return 500; } } if(strcmp(entry,"")!=0) sendMessage(sock,entry); sendMessage(sock,"\r\n"); } closedir(dir); return 200; } int parseRequest(char folder[], char message[], int sock){ DIR* dir=opendir(folder); if(dir==NULL){ return 1; } int ret=0; if(strstr(message,"LSTF")!=NULL){ sendMessage(sock,STATE_PENDING); ret=listFolder(folder,sock); sendMessage(sock,"\r\n.\r\n"); } else if(strstr(message,"LSTR")!=NULL){ sendMessage(sock,STATE_PENDING); ret=listRecursive(folder,sock); sendMessage(sock,"\r\n.\r\n"); } else if(strstr(message,"ENCR")!=NULL){ char s[4]=""; unsigned int seed=-1; char path[PATHLEN]="errore"; sscanf(message,"%s %u %[^\n]%*s",s,&seed,path); if(seed!=-1 && strcmp(path,"errore")!=0){ ret=encrypt(path,seed,sock); } } else if(strstr(message,"DECR")!=NULL){ char s[4]=""; unsigned int seed=-1; char path[PATHLEN]="errore"; sscanf(message,"%s %u %[^\n]%*s",s,&seed,path); if(seed!=-1 && strcmp(path,"errore")!=0){ ret=decrypt(path,seed,sock); } } //gestione codici di risposta if(ret==200){ sendMessage(sock,STATE_OK); } else if(ret==400){ sendMessage(sock,lastError); sendMessage(sock,STATE_ERROR); } else if(ret==500){ sendMessage(sock,lastError); sendMessage(sock,STATE_UNAVAIL); } return ret; } int addRequest(pthread_mutex_t *mutex,pthread_cond_t *cond,char *folder,char *address,char *message,int sock){ struct request *req=(struct request *)malloc(sizeof(struct request)); if(!req){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore malloc richiesta.\n"); writeLog(LOGFILE,toLog); return 1; } pthread_mutex_lock(mutex); req->ID=nextReqID; req->folder=folder; req->address=address; req->message=message; req->sock=sock; req->next=NULL; char toLog[BUFSIZE]=""; sprintf(toLog,"[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message); writeLog(LOGFILE,toLog); //printf("[Richiesta #%i] [%s] [%s]\n",nextReqID,address,message); if(numReqs==0) first=req; else last->next=req; last=req; numReqs++; pthread_cond_broadcast(cond); pthread_mutex_unlock(mutex); nextReqID++; return 0; } struct request* removeRequest(pthread_mutex_t *mutex){ struct request *req; pthread_mutex_lock(mutex); if(numReqs>0){ req=first; first=req->next; if(first==NULL) last=NULL; numReqs--; } else{ req=NULL; } pthread_mutex_unlock(mutex); return req; } void *task(void *arg){ int *threadID=(int *)arg; struct request *req; while(run){ pthread_mutex_lock(&reqMutex); int r=numReqs; pthread_mutex_unlock(&reqMutex); if(r>0){ pthread_mutex_lock(&reqMutex); req=removeRequest(&reqMutex); pthread_mutex_unlock(&reqMutex); if(req){ char *folder=req->folder; char *message=req->message; int sock=req->sock; int reqID=req->ID; //printf("[Richiesta #%i] [Thread #%i - assegnata]\n",reqID,*threadID); int ret=parseRequest(folder, message, sock); char toLog[BUFSIZE]=""; sprintf(toLog,"[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret); writeLog(LOGFILE,toLog); //printf("[Richiesta #%i] [Thread #%i: %i]\n",reqID,*threadID,ret); free(req); close(sock); } } else{ pthread_mutex_lock(&reqMutex); pthread_cond_wait(&reqCond,&reqMutex); pthread_mutex_unlock(&reqMutex); } } return NULL; } int executeServer(char folder[], unsigned short port, int threadNum){ DIR* dir=opendir(folder); if(dir){ closedir(dir); //crea socket in ascolto int serverSock; struct sockaddr_in serveraddr; int optval; int msglen; serverSock=socket(AF_INET, SOCK_STREAM, 0); if(serverSock<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore apertura socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore apertura socket.\n"); return 1; } //riutilizzo indirizzo in modo veloce optval=1; setsockopt(serverSock, SOL_SOCKET, SO_REUSEADDR, (const void*)&optval, sizeof(int)); memset((char *)&serveraddr,0,sizeof(serveraddr)); serveraddr.sin_family = AF_INET; serveraddr.sin_addr.s_addr=htonl(INADDR_ANY); serveraddr.sin_port=htons(port); if(bind(serverSock, (struct sockaddr *)&serveraddr, sizeof(serveraddr))<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore binding sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore binding sul socket.\n"); return 1; } if(listen(serverSock,5)<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore listening sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore listening sul socket.\n"); return 1; } //crea thread pool int threadID[threadNum]; pthread_t threads[threadNum]; for(int i=0;i<threadNum;i++){ threadID[i]=i; pthread_create(&threads[i],NULL,task,(void *)&threadID[i]); } //ricevi richiesta ed inseriscila in coda, da processare da un thread int clientSock; struct sockaddr_in clientAddr; char message[BUFSIZE]; unsigned int clientlen=sizeof(clientAddr); while(true){ //ricevi connessione dal client clientSock=accept(serverSock, (struct sockaddr *)&clientAddr, &clientlen); if(clientSock<0){ break; } //ottiene l'indirizzo del client char clientAddrReadable[NI_MAXHOST]; if (getnameinfo((const struct sockaddr *)&clientAddr, clientlen, clientAddrReadable, sizeof(clientAddrReadable), NULL, sizeof(NULL), NI_NUMERICHOST) != 0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore risoluzione client.\n"); writeLog(LOGFILE,toLog); break; } //ottieni la richiesta inviata dal client memset(message,0,BUFSIZE); msglen=read(clientSock,message,BUFSIZE); if(msglen<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore read sul socket.\n"); writeLog(LOGFILE,toLog); //printf("Errore read sul socket.\n"); break; } //aggiungi la richiesta in coda if(addRequest(&reqMutex,&reqCond,folder,clientAddrReadable,message,clientSock)!=0){ break; } } //chiudi socket close(serverSock); //attendi chiusura pthread for(int i=0;i<threadNum;i++){ pthread_join(threads[i],NULL); } } else if(ENOENT == errno || ENOTDIR == errno){ char toLog[BUFSIZE]=""; sprintf(toLog,"La cartella %s non e' una directory valida o non esiste.\n",folder); writeLog(LOGFILE,toLog); //printf("La cartella %s non e' una directory valida o non esiste.\n",folder); return 1; } return 0; } void showHelp(char *command){ printf("server~ "); if(strcmp(command,"-h")!=0) printf("Comando non valido.\n\t"); printf("Usage: {comando_1} [valore_1] ... {comando_n} [valore_n]\n\t\ Ogni valore e' marcato come opzionale, ma puo' essere obbligatorio a seconda del comando che lo precede.\n\n\t\ Comandi (valori obbligatori):\n\t\ -c\t obbligatorio, specifica la cartella di partenza\n\t\ \t ignora la voce folder=<dir/to/start/with>\n\t\ -p\t specifica la porta TCP sulla quale restare in ascolto; default: 8888\n\t\ \t ignora la voce port=<portNum>\n\t\ -n\t specifica il numero di thread da utilizzare; default: 1\n\t\ \t ignora la voce threadNumber=<threadNum>\n\n\t\ Comandi (nessun valore necessario):\n\t\ -h\t mostra questo messaggio\n\n\t\ Dettagli:\n\t\ Tutti i parametri possono essere definiti tramite il file misc/server.conf, ma ignorati se specificati tramite riga di comando.\n\t\ In particolare, l'opzione -c non e' obbligatoria se la cartella e' specificata in tale file.\n"); return; } int main(int argc, char *argv[]){ int r=mkdir("misc",0777); if(r!=0 && errno!=EEXIST){ printf("Errore creazione directory di log.\n"); return 1; } FILE *srvlog=fopen(LOGFILE,"w"); if(srvlog==NULL){ printf("Errore creazione file di log.\n"); return 1; } fclose(srvlog); memset(folder,0,PATHLEN); port=0; threadNum=-1; loadConfig(&port,folder,&threadNum); if(argc>1){ for(int i=1;i<argc;i++){ if(strcmp(argv[i],"-c")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ memset(folder,0,PATHLEN); strncpy(folder,argv[i+1],strlen(argv[i+1])); i++; } else{ showHelp(argv[i]); } } else if(strcmp(argv[i],"-p")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ port=(unsigned short)atoi(argv[i+1]); i++; } else{ showHelp(argv[i]); } } else if(strcmp(argv[i],"-n")==0){ if(i+1<argc && strstr(argv[i+1],"-")==NULL){ threadNum=atoi(argv[i+1]); i++; } else{ showHelp(argv[i]); } } else showHelp(argv[i]); } } if(strcmp(folder,"\0")==0){ showHelp(argv[0]); return 1; } makeDaemon(); struct sigaction sa; memset((char *)&sa,0,sizeof(sa)); sa.sa_handler=sigHandler; if(sigaction(SIGHUP, &sa, NULL)<0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore sigaction\n"); writeLog(LOGFILE,toLog); return 1; } //inizializzazione variabili globali nextReqID=0; numReqs=0; reqMutex=(pthread_mutex_t)PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; reqCond=(pthread_cond_t)PTHREAD_COND_INITIALIZER; run=true; while(true){ executeServer(folder,port,threadNum); //executeServer termina all'arrivo del SIGHUP, rileggo il file if(loadConfig(&port,folder,&threadNum)!=0){ char toLog[BUFSIZE]=""; sprintf(toLog,"Errore lettura del file di configurazione.\n"); writeLog(LOGFILE,toLog); return 1; } run=true; } return 0; } static void makeDaemon(){ pid_t pid = fork(); if(pid<0){ printf("Errore fork\n"); exit(EXIT_FAILURE); } //tutto okay, il genitore può terminare if(pid>0) exit(EXIT_SUCCESS); //leader sessione if(setsid()<0) exit(EXIT_FAILURE); //ignoro segnali struct sigaction sa; memset((char *)&sa,0,sizeof(sa)); sa.sa_handler=sigIgnorer; if(sigaction(SIGHUP, &sa, NULL)<0){ printf("Errore sigaction.\n"); exit(EXIT_FAILURE); } if(sigaction(SIGCHLD, &sa, NULL)<0){ printf("Errore sigaction.\n"); exit(EXIT_FAILURE); } //secondo fork pid=fork(); if(pid<0){ printf("Errore fork\n"); exit(EXIT_FAILURE); } //tutto okay, il genitore può terminare if(pid>0) exit(EXIT_SUCCESS); //scrivo pid nel file di log pid=getpid(); char toLog[BUFSIZE]=""; sprintf(toLog,"Server avviato. PID: %ld\n",(long)pid); writeLog(LOGFILE,toLog); //chiudi tutti i descrittori aperti for(int i=sysconf(_SC_OPEN_MAX);i>=0;i--){ close(i); } } void sigIgnorer(int signal){ // } void sigHandler(int signal){ run=false; pthread_mutex_lock(&reqMutex); pthread_cond_broadcast(&reqCond); pthread_mutex_unlock(&reqMutex); }
nbody_mkl.c
#include <stdlib.h> #include <stdio.h> #include <mkl.h> #include <mkl_extensions.h> #include <string.h> #include <vec.h> #include "nbody.h" #include "nbody_mkl.h" /** Computes Sum(G * pm / r ** 2 * (dx / r)). * * Diagonal elements are not counted in the sum. * */ void compute_force(MKL_INT n, double *dx, double *pm, double *r, double *tmp1, double *output) { MKL_INT size = n * n; vdMuli(size, pm, G, tmp1); vdPowx(size, r, 2.0, output); vdDiv(size, tmp1, output, tmp1); vdDiv(size, dx, r, output); vdMul(size, tmp1, output, tmp1); memset(output, 0, sizeof(double) * n); #pragma omp parallel for for (MKL_INT i = 0; i < n; i++) { double sum = 0.0; for (MKL_INT j = 0; j < n; j++) { // Ignore diagonal elements. if (i != j) { // Causes some imprecision compared to reference? sum += tmp1[i*n + j]; } } output[i] += sum; } } void move(MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz, // Temporaries that have n * n space. double *dx, double *dy, double *dz, double *pm, double *r, double *tmp1, double *tmp2) { set_delta(n, x, dx); set_delta(n, y, dy); set_delta(n, z, dz); set_pm(n, m, pm); MKL_INT size = n * n; // r = sqrt(dx**2 + dy**2 + dz**2) vdPowx(size, dx, 2.0, tmp1); vdPowx(size, dy, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdPowx(size, dz, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdSqrt(size, tmp1, r); compute_force(n, dx, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vx, tmp1, vx); vdMuli(n, vx, dt, tmp1); vdAdd(n, x, tmp1, x); compute_force(n, dy, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vy, tmp1, vy); vdMuli(n, vy, dt, tmp1); vdAdd(n, y, tmp1, y); compute_force(n, dz, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vz, tmp1, vz); vdMuli(n, vz, dt, tmp1); vdAdd(n, z, tmp1, z); } void run_mkl(int iterations, MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz) { vec_t dx = new_vec(n * n, 0); vec_t dy = new_vec(n * n, 0); vec_t dz = new_vec(n * n, 0); vec_t pm = new_vec(n * n, 0); vec_t r = new_vec(n * n, 0); vec_t tmp1 = new_vec(n * n, 0); vec_t tmp2 = new_vec(n * n, 0); for (int i = 0; i < iterations; i++) { printf("iteration %d\n", i); move(n, m, x, y, z, vx, vy, vz, dx.data, dy.data, dz.data, pm.data, r.data, tmp1.data, tmp2.data); } }
#include <stdlib.h> #include <stdio.h> #include <mkl.h> #include <mkl_extensions.h> #include <string.h> #include <vec.h> #include "nbody.h" #include "nbody_mkl.h" /** Computes Sum(G * pm / r ** 2 * (dx / r)). * * Diagonal elements are not counted in the sum. * */ void compute_force(MKL_INT n, double *dx, double *pm, double *r, double *tmp1, double *output) { MKL_INT size = n * n; vdMuli(size, pm, G, tmp1); vdPowx(size, r, 2.0, output); vdDiv(size, tmp1, output, tmp1); vdDiv(size, dx, r, output); vdMul(size, tmp1, output, tmp1); memset(output, 0, sizeof(double) * n); for (MKL_INT i = 0; i < n; i++) { double sum = 0.0; for (MKL_INT j = 0; j < n; j++) { //Ignore diagonal elements. if (i != j) { //Causes some imprecision compared to reference ? sum += tmp1[i * n + j]; } } output[i] += sum; } } void move(MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz, //Temporaries that have n * n space. double *dx, double *dy, double *dz, double *pm, double *r, double *tmp1, double *tmp2) { set_delta(n, x, dx); set_delta(n, y, dy); set_delta(n, z, dz); set_pm(n, m, pm); MKL_INT size = n * n; //r = sqrt(dx ** 2 + dy ** 2 + dz ** 2) vdPowx(size, dx, 2.0, tmp1); vdPowx(size, dy, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdPowx(size, dz, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdSqrt(size, tmp1, r); compute_force(n, dx, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vx, tmp1, vx); vdMuli(n, vx, dt, tmp1); vdAdd(n, x, tmp1, x); compute_force(n, dy, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vy, tmp1, vy); vdMuli(n, vy, dt, tmp1); vdAdd(n, y, tmp1, y); compute_force(n, dz, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vz, tmp1, vz); vdMuli(n, vz, dt, tmp1); vdAdd(n, z, tmp1, z); } void run_mkl(int iterations, MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz) { vec_t dx = new_vec(n * n, 0); vec_t dy = new_vec(n * n, 0); vec_t dz = new_vec(n * n, 0); vec_t pm = new_vec(n * n, 0); vec_t r = new_vec(n * n, 0); vec_t tmp1 = new_vec(n * n, 0); vec_t tmp2 = new_vec(n * n, 0); for (int i = 0; i < iterations; i++) { printf("iteration %d\n", i); move(n, m, x, y, z, vx, vy, vz, dx.data, dy.data, dz.data, pm.data, r.data, tmp1.data, tmp2.data); } }
#include <stdlib.h> #include <stdio.h> #include <mkl.h> #include <mkl_extensions.h> #include <string.h> #include <vec.h> #include "nbody.h" #include "nbody_mkl.h" /** Computes Sum(G * pm / r ** 2 * (dx / r)). * * Diagonal elements are not counted in the sum. * */ void compute_force(MKL_INT n, double *dx, double *pm, double *r, double *tmp1, double *output) { MKL_INT size = n * n; vdMuli(size, pm, G, tmp1); vdPowx(size, r, 2.0, output); vdDiv(size, tmp1, output, tmp1); vdDiv(size, dx, r, output); vdMul(size, tmp1, output, tmp1); memset(output, 0, sizeof(double) * n); #pragma omp parallel for for (MKL_INT i = 0; i < n; i++) { double sum = 0.0; for (MKL_INT j = 0; j < n; j++) { //Ignore diagonal elements. if (i != j) { //Causes some imprecision compared to reference ? sum += tmp1[i * n + j]; } } output[i] += sum; } } void move(MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz, //Temporaries that have n * n space. double *dx, double *dy, double *dz, double *pm, double *r, double *tmp1, double *tmp2) { set_delta(n, x, dx); set_delta(n, y, dy); set_delta(n, z, dz); set_pm(n, m, pm); MKL_INT size = n * n; //r = sqrt(dx ** 2 + dy ** 2 + dz ** 2) vdPowx(size, dx, 2.0, tmp1); vdPowx(size, dy, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdPowx(size, dz, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdSqrt(size, tmp1, r); compute_force(n, dx, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vx, tmp1, vx); vdMuli(n, vx, dt, tmp1); vdAdd(n, x, tmp1, x); compute_force(n, dy, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vy, tmp1, vy); vdMuli(n, vy, dt, tmp1); vdAdd(n, y, tmp1, y); compute_force(n, dz, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vz, tmp1, vz); vdMuli(n, vz, dt, tmp1); vdAdd(n, z, tmp1, z); } void run_mkl(int iterations, MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz) { vec_t dx = new_vec(n * n, 0); vec_t dy = new_vec(n * n, 0); vec_t dz = new_vec(n * n, 0); vec_t pm = new_vec(n * n, 0); vec_t r = new_vec(n * n, 0); vec_t tmp1 = new_vec(n * n, 0); vec_t tmp2 = new_vec(n * n, 0); for (int i = 0; i < iterations; i++) { printf("iteration %d\n", i); move(n, m, x, y, z, vx, vy, vz, dx.data, dy.data, dz.data, pm.data, r.data, tmp1.data, tmp2.data); } }
main.c
#define __STDC_WANT_LIB_EXT2__ 1 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include <assert.h> #include <unistd.h> #include <getopt.h> #include <omp.h> #include "samrecord.h" #include "align.h" #include "barcodes.h" #include "util.h" #include "main.h" // preprocess #include "cpp/main.h" #include "cpp/count.h" #include "cpp/correct.h" int num_threads_per_file = 1; int num_threads_for_files = 1; char *rg = "@RG\tID:rg1\tSM:sample1"; char *bx_index = "1"; char **pg_argv = NULL; int pg_argc = 0; PlatformProfile *tech; #define MAX_CHROM_NAME_LEN 256 static struct { char chrom_name[MAX_CHROM_NAME_LEN]; } *chroms; char *chrom_lookup(const chrom_t chrom) { return chroms[chrom].chrom_name; } chrom_t chrom_index(const char *chrom) { size_t len = 0; while (!isspace(chrom[len]) && chrom[len] != '\0') ++len; for (size_t i = 0; chroms[i].chrom_name[0] != '\0'; i++) { if (strncmp(chrom, chroms[i].chrom_name, len) == 0) { return i; } } assert(0); return 0; } void read_fai(FILE *fai_file) { const size_t lines = count_lines(fai_file); chroms = safe_malloc(lines * sizeof(*chroms)); size_t i = 0; while (fgets(chroms[i].chrom_name, MAX_CHROM_NAME_LEN, fai_file)) { size_t j = 0; while (!isspace(chroms[i].chrom_name[j])) ++j; chroms[i++].chrom_name[j] = '\0'; } chroms[i].chrom_name[0] = '\0'; } static int validate_read_group(const char *rg) { return strstr(rg, "@RG\t") == &rg[0] && strstr(rg, "\tID:") != NULL; } static void print_help_and_exit(const char *argv0, int error) { #define P(...) fprintf(out, __VA_ARGS__) FILE *out = error ? stderr : stdout; P("usage: %s <count|preproc|align|help> [options]\n", argv0); P("\n"); P("count: perform preliminary barcode count (takes interleaved FASTQ via stdin)\n"); P(" -w <whitelist path>: specify barcode whitelist [required]\n"); P(" -o <output prefix>: specify output prefix [required]\n"); P("\n"); P("preproc: preprocess barcoded FASTQ files (takes interleaved FASTQ via stdin)\n"); P(" -w <whitelist path>: specify whitelist [required]\n"); P(" -n <num buckets>: number of barcode buckets to make [500]\n"); P(" -h: apply Hamming-2 correction [off]\n"); P(" -o: <output directory> specify output directory [required]\n"); P(" -b: output BX:Z-formatted FASTQs [off]\n"); P(" -t <threads>: set number of threads [1]\n"); P(" all other arguments: list of all output prefixes generated by count stage\n"); P("\n"); P("align: choose best alignments based on barcodes\n"); P(" -1 <FASTQ1 path>: first (preprocessed and sorted) FASTQ file [none]\n"); P(" -2 <FASTQ2 path>: second (preprocessed and sorted) FASTQ file [none]\n"); P(" -s <EMA-FASTQ path>: specify special FASTQ path [none]\n"); P(" -x: multi-input mode; takes input files after flags and spawns a thread for each [off]\n"); P(" -r <FASTA path>: indexed reference [required]\n"); P(" -o <SAM file>: output SAM file [stdout]\n"); P(" -R <RG string>: full read group string (e.g. '@RG\\tID:foo\\tSM:bar') [none]\n"); P(" -d: apply fragment read density optimization [off]\n"); P(" -p <platform>: sequencing platform (one of '10x', 'tru', 'cpt') [10x]\n"); P(" -i <index>: index to follow 'BX' tag in SAM output [1]"); P(" -t <threads>: set number of threads [1]\n"); P(" all other arguments (only for -x): list of all preprocessed inputs\n"); P("\n"); P("help: print this help message\n"); exit(error ? EXIT_FAILURE : EXIT_SUCCESS); #undef P } int main(const int argc, char *argv[]) { #define EQ(s1, s2) (strcmp((s1), (s2)) == 0) const char *argv0 = argv[0]; if (argc < 2) { fprintf(stderr, "EMA version %s\n", VERSION); fprintf(stderr, "note: use '%s help' for usage information.\n", argv0); return EXIT_SUCCESS; } pg_argv = argv; pg_argc = argc; const char *mode = argv[1]; if (EQ(mode, "preproc")) { cppinit(); char *wl = NULL; int nbuckets = 500; int do_h2 = 0; char *out = NULL; int t = 1; char c; char do_bx_format = 0; while ((c = getopt(argc-1, &argv[1], "w:n:hbo:t:")) != -1) { switch (c) { case 'w': wl = strdup(optarg); break; case 'n': nbuckets = atoi(optarg); break; case 'h': do_h2 = 1; break; case 'o': out = strdup(optarg); break; case 't': t = atoi(optarg); break; case 'b': do_bx_format = 1; break; default: print_help_and_exit(argv0, 1); } } if (wl == NULL) { fprintf(stderr, "error: specify barcode whitelist with -w\n"); exit(EXIT_FAILURE); } if (out == NULL) { fprintf(stderr, "error: specify output directory with -o\n"); exit(EXIT_FAILURE); } const size_t n_inputs = argc - optind - 1; if (n_inputs == 0) { fprintf(stderr, "warning: no input files specified; nothing to do\n"); exit(EXIT_SUCCESS); } const char **inputs = safe_malloc(n_inputs * sizeof(*inputs)); for (int i = optind + 1; i < argc; i++) { const int j = i - (optind + 1); // w.r.t. `inputs` inputs[j] = strdup(argv[i]); } correct(wl, inputs, n_inputs, out, do_h2, 10 * MB, do_bx_format, t, nbuckets); return EXIT_SUCCESS; } if (EQ(mode, "count")) { cppinit(); char *wl = NULL; char *out = NULL; char c; while ((c = getopt(argc-1, &argv[1], "w:o:")) != -1) { switch (c) { case 'w': wl = strdup(optarg); break; case 'o': out = strdup(optarg); break; default: print_help_and_exit(argv0, 1); } } if (wl == NULL) { fprintf(stderr, "error: specify barcode whitelist with -w\n"); exit(EXIT_FAILURE); } if (out == NULL) { fprintf(stderr, "error: specify output prefix with -o\n"); exit(EXIT_FAILURE); } count(wl, out, 1 * GB); return EXIT_SUCCESS; } if (EQ(mode, "align")) { char *ref = NULL; char *fq1 = NULL; char *fq2 = NULL; char *fqx = NULL; char *fai = NULL; char *out = NULL; int apply_opt = 0; int multi_input = 0; char *platform = "10x"; int t = 1; char c; while ((c = getopt(argc-1, &argv[1], "r:1:2:s:xo:R:dp:i:t:")) != -1) { switch (c) { case 'r': ref = strdup(optarg); break; case '1': fq1 = strdup(optarg); break; case '2': fq2 = strdup(optarg); break; case 's': fqx = strdup(optarg); break; case 'x': multi_input = 1; break; case 'o': out = strdup(optarg); break; case 'R': rg = escape(strdup(optarg)); break; case 'd': apply_opt = 1; break; case 'p': platform = strdup(optarg); break; case 'i': bx_index = strdup(optarg); break; case 't': t = atoi(optarg); break; default: print_help_and_exit(argv0, 1); } } if (multi_input + (fqx != NULL) + (fq1 != NULL || fq2 != NULL) != 1) { fprintf(stderr, "error: must specify *exactly one* of -1/-2, -s or -x\n"); exit(EXIT_FAILURE); } if (fq1 == NULL && fq2 != NULL) { fprintf(stderr, "error: cannot specify -2 without -1\n"); exit(EXIT_FAILURE); } if (ref == NULL) { fprintf(stderr, "error: specify reference FASTA with -r\n"); exit(EXIT_FAILURE); } if (rg != NULL && !validate_read_group(rg)) { fprintf(stderr, "error: malformed read group: '%s'\n", rg); exit(EXIT_FAILURE); } if ((tech = get_platform_profile_by_name(platform)) == NULL) { fprintf(stderr, "error: invalid platform name: '%s'\n", platform); exit(EXIT_FAILURE); } FILE *fq1_file = NULL; FILE *fq2_file = NULL; FILE *fqx_file = NULL; FILE *out_file = (out == NULL ? stdout : fopen(out, "w")); if (!out_file) { IOERROR(out); } if (fqx != NULL) { fqx_file = fopen(fqx, "r"); if (!fqx_file) { IOERROR(fqx); } } else if (fq1 != NULL) { fq1_file = fopen(fq1, "r"); if (!fq1_file) { IOERROR(fq1); } if (fq2 != NULL) { fq2_file = fopen(fq2, "r"); if (!fq2_file) { IOERROR(fq2); } } else { fq2_file = fq1_file; } } #define FAI_EXT ".fai" fai = safe_malloc(strlen(ref) + strlen(FAI_EXT) + 1); strcpy(fai, ref); strcat(fai, FAI_EXT); #undef FAI_EXT FILE *fai_file = fopen(fai, "r"); if (!fai_file) { IOERROR(fai); } read_fai(fai_file); fclose(fai_file); bwa_init(ref); write_sam_header(out_file); if (multi_input) { num_threads_for_files = t; num_threads_per_file = 1; const size_t n_inputs = argc - optind - 1; if (n_inputs == 0) { fprintf(stderr, "warning: no input files specified; nothing to do\n"); exit(EXIT_SUCCESS); } FILE **inputs = safe_malloc(n_inputs * sizeof(*inputs)); for (int i = optind + 1; i < argc; i++) { const int j = i - (optind + 1); // w.r.t. `inputs` and `outputs` const char *filename = strdup(argv[i]); inputs[j] = fopen(filename, "r"); if (!inputs[j]) { IOERROR(filename); } } omp_set_nested(1); omp_lock_t out_lock; omp_init_lock(&out_lock); #pragma omp parallel for num_threads(num_threads_for_files) for (size_t i = 0; i < n_inputs; i++) { find_clouds_and_align(NULL, NULL, inputs[i], out_file, apply_opt, NULL, &out_lock); fclose(inputs[i]); } omp_destroy_lock(&out_lock); free(inputs); } else { num_threads_for_files = 1; num_threads_per_file = t; find_clouds_and_align(fq1_file, fq2_file, fqx_file, out_file, apply_opt, NULL, NULL); if (fq1_file) fclose(fq1_file); if (fq2_file && fq2_file != fq1_file) fclose(fq2_file); if (fqx_file) fclose(fqx_file); } fclose(out_file); bwa_dealloc(); free(fai); free(chroms); return EXIT_SUCCESS; } if (EQ(mode, "help")) { print_help_and_exit(argv0, 0); } fprintf(stderr, "error: unrecognized mode\n"); print_help_and_exit(argv0, 1); #undef EQ }
#define __STDC_WANT_LIB_EXT2__ 1 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include <assert.h> #include <unistd.h> #include <getopt.h> #include <omp.h> #include "samrecord.h" #include "align.h" #include "barcodes.h" #include "util.h" #include "main.h" // preprocess #include "cpp/main.h" #include "cpp/count.h" #include "cpp/correct.h" int num_threads_per_file = 1; int num_threads_for_files = 1; char *rg = "@RG\tID:rg1\tSM:sample1"; char *bx_index = "1"; char **pg_argv = NULL; int pg_argc = 0; PlatformProfile *tech; #define MAX_CHROM_NAME_LEN 256 static struct { char chrom_name[MAX_CHROM_NAME_LEN]; } *chroms; char * chrom_lookup(const chrom_t chrom) { return chroms[chrom].chrom_name; } chrom_t chrom_index(const char *chrom) { size_t len = 0; while (!isspace(chrom[len]) && chrom[len] != '\0') ++len; for (size_t i = 0; chroms[i].chrom_name[0] != '\0'; i++) { if (strncmp(chrom, chroms[i].chrom_name, len) == 0) { return i; } } assert(0); return 0; } void read_fai(FILE * fai_file) { const size_t lines = count_lines(fai_file); chroms = safe_malloc(lines * sizeof(*chroms)); size_t i = 0; while (fgets(chroms[i].chrom_name, MAX_CHROM_NAME_LEN, fai_file)) { size_t j = 0; while (!isspace(chroms[i].chrom_name[j])) ++j; chroms[i++].chrom_name[j] = '\0'; } chroms[i].chrom_name[0] = '\0'; } static int validate_read_group(const char *rg) { return strstr(rg, "@RG\t") == &rg[0] && strstr(rg, "\tID:") != NULL; } static void print_help_and_exit(const char *argv0, int error) { #define P(...) fprintf(out, __VA_ARGS__) FILE *out = error ? stderr : stdout; P("usage: %s <count|preproc|align|help> [options]\n", argv0); P("\n"); P("count: perform preliminary barcode count (takes interleaved FASTQ via stdin)\n"); P(" -w <whitelist path>: specify barcode whitelist [required]\n"); P(" -o <output prefix>: specify output prefix [required]\n"); P("\n"); P("preproc: preprocess barcoded FASTQ files (takes interleaved FASTQ via stdin)\n"); P(" -w <whitelist path>: specify whitelist [required]\n"); P(" -n <num buckets>: number of barcode buckets to make [500]\n"); P(" -h: apply Hamming-2 correction [off]\n"); P(" -o: <output directory> specify output directory [required]\n"); P(" -b: output BX:Z-formatted FASTQs [off]\n"); P(" -t <threads>: set number of threads [1]\n"); P(" all other arguments: list of all output prefixes generated by count stage\n"); P("\n"); P("align: choose best alignments based on barcodes\n"); P(" -1 <FASTQ1 path>: first (preprocessed and sorted) FASTQ file [none]\n"); P(" -2 <FASTQ2 path>: second (preprocessed and sorted) FASTQ file [none]\n"); P(" -s <EMA-FASTQ path>: specify special FASTQ path [none]\n"); P(" -x: multi-input mode; takes input files after flags and spawns a thread for each [off]\n"); P(" -r <FASTA path>: indexed reference [required]\n"); P(" -o <SAM file>: output SAM file [stdout]\n"); P(" -R <RG string>: full read group string (e.g. '@RG\\tID:foo\\tSM:bar') [none]\n"); P(" -d: apply fragment read density optimization [off]\n"); P(" -p <platform>: sequencing platform (one of '10x', 'tru', 'cpt') [10x]\n"); P(" -i <index>: index to follow 'BX' tag in SAM output [1]"); P(" -t <threads>: set number of threads [1]\n"); P(" all other arguments (only for -x): list of all preprocessed inputs\n"); P("\n"); P("help: print this help message\n"); exit(error ? EXIT_FAILURE : EXIT_SUCCESS); #undef P } int main(const int argc, char *argv[]) { #define EQ(s1, s2) (strcmp((s1), (s2)) == 0) const char *argv0 = argv[0]; if (argc < 2) { fprintf(stderr, "EMA version %s\n", VERSION); fprintf(stderr, "note: use '%s help' for usage information.\n", argv0); return EXIT_SUCCESS; } pg_argv = argv; pg_argc = argc; const char *mode = argv[1]; if (EQ(mode, "preproc")) { cppinit(); char *wl = NULL; int nbuckets = 500; int do_h2 = 0; char *out = NULL; int t = 1; char c; char do_bx_format = 0; while ((c = getopt(argc - 1, &argv[1], "w:n:hbo:t:")) != -1) { switch (c) { case 'w': wl = strdup(optarg); break; case 'n': nbuckets = atoi(optarg); break; case 'h': do_h2 = 1; break; case 'o': out = strdup(optarg); break; case 't': t = atoi(optarg); break; case 'b': do_bx_format = 1; break; default: print_help_and_exit(argv0, 1); } } if (wl == NULL) { fprintf(stderr, "error: specify barcode whitelist with -w\n"); exit(EXIT_FAILURE); } if (out == NULL) { fprintf(stderr, "error: specify output directory with -o\n"); exit(EXIT_FAILURE); } const size_t n_inputs = argc - optind - 1; if (n_inputs == 0) { fprintf(stderr, "warning: no input files specified; nothing to do\n"); exit(EXIT_SUCCESS); } const char **inputs = safe_malloc(n_inputs * sizeof(*inputs)); for (int i = optind + 1; i < argc; i++) { const int j = i - (optind + 1); //w.r.t.` inputs ` inputs[j] = strdup(argv[i]); } correct(wl, inputs, n_inputs, out, do_h2, 10 * MB, do_bx_format, t, nbuckets); return EXIT_SUCCESS; } if (EQ(mode, "count")) { cppinit(); char *wl = NULL; char *out = NULL; char c; while ((c = getopt(argc - 1, &argv[1], "w:o:")) != -1) { switch (c) { case 'w': wl = strdup(optarg); break; case 'o': out = strdup(optarg); break; default: print_help_and_exit(argv0, 1); } } if (wl == NULL) { fprintf(stderr, "error: specify barcode whitelist with -w\n"); exit(EXIT_FAILURE); } if (out == NULL) { fprintf(stderr, "error: specify output prefix with -o\n"); exit(EXIT_FAILURE); } count(wl, out, 1 * GB); return EXIT_SUCCESS; } if (EQ(mode, "align")) { char *ref = NULL; char *fq1 = NULL; char *fq2 = NULL; char *fqx = NULL; char *fai = NULL; char *out = NULL; int apply_opt = 0; int multi_input = 0; char *platform = "10x"; int t = 1; char c; while ((c = getopt(argc - 1, &argv[1], "r:1:2:s:xo:R:dp:i:t:")) != -1) { switch (c) { case 'r': ref = strdup(optarg); break; case '1': fq1 = strdup(optarg); break; case '2': fq2 = strdup(optarg); break; case 's': fqx = strdup(optarg); break; case 'x': multi_input = 1; break; case 'o': out = strdup(optarg); break; case 'R': rg = escape(strdup(optarg)); break; case 'd': apply_opt = 1; break; case 'p': platform = strdup(optarg); break; case 'i': bx_index = strdup(optarg); break; case 't': t = atoi(optarg); break; default: print_help_and_exit(argv0, 1); } } if (multi_input + (fqx != NULL) + (fq1 != NULL || fq2 != NULL) != 1) { fprintf(stderr, "error: must specify *exactly one* of -1/-2, -s or -x\n"); exit(EXIT_FAILURE); } if (fq1 == NULL && fq2 != NULL) { fprintf(stderr, "error: cannot specify -2 without -1\n"); exit(EXIT_FAILURE); } if (ref == NULL) { fprintf(stderr, "error: specify reference FASTA with -r\n"); exit(EXIT_FAILURE); } if (rg != NULL && !validate_read_group(rg)) { fprintf(stderr, "error: malformed read group: '%s'\n", rg); exit(EXIT_FAILURE); } if ((tech = get_platform_profile_by_name(platform)) == NULL) { fprintf(stderr, "error: invalid platform name: '%s'\n", platform); exit(EXIT_FAILURE); } FILE *fq1_file = NULL; FILE *fq2_file = NULL; FILE *fqx_file = NULL; FILE *out_file = (out == NULL ? stdout : fopen(out, "w")); if (!out_file) { IOERROR(out); } if (fqx != NULL) { fqx_file = fopen(fqx, "r"); if (!fqx_file) { IOERROR(fqx); } } else if (fq1 != NULL) { fq1_file = fopen(fq1, "r"); if (!fq1_file) { IOERROR(fq1); } if (fq2 != NULL) { fq2_file = fopen(fq2, "r"); if (!fq2_file) { IOERROR(fq2); } } else { fq2_file = fq1_file; } } #define FAI_EXT ".fai" fai = safe_malloc(strlen(ref) + strlen(FAI_EXT) + 1); strcpy(fai, ref); strcat(fai, FAI_EXT); #undef FAI_EXT FILE *fai_file = fopen(fai, "r"); if (!fai_file) { IOERROR(fai); } read_fai(fai_file); fclose(fai_file); bwa_init(ref); write_sam_header(out_file); if (multi_input) { num_threads_for_files = t; num_threads_per_file = 1; const size_t n_inputs = argc - optind - 1; if (n_inputs == 0) { fprintf(stderr, "warning: no input files specified; nothing to do\n"); exit(EXIT_SUCCESS); } FILE **inputs = safe_malloc(n_inputs * sizeof(*inputs)); for (int i = optind + 1; i < argc; i++) { const int j = i - (optind + 1); //w.r.t.` inputs ` and ` outputs ` const char *filename = strdup(argv[i]); inputs[j] = fopen(filename, "r"); if (!inputs[j]) { IOERROR(filename); } } omp_set_nested(1); omp_lock_t out_lock; omp_init_lock(&out_lock); for (size_t i = 0; i < n_inputs; i++) { find_clouds_and_align(NULL, NULL, inputs[i], out_file, apply_opt, NULL, &out_lock); fclose(inputs[i]); } omp_destroy_lock(&out_lock); free(inputs); } else { num_threads_for_files = 1; num_threads_per_file = t; find_clouds_and_align(fq1_file, fq2_file, fqx_file, out_file, apply_opt, NULL, NULL); if (fq1_file) fclose(fq1_file); if (fq2_file && fq2_file != fq1_file) fclose(fq2_file); if (fqx_file) fclose(fqx_file); } fclose(out_file); bwa_dealloc(); free(fai); free(chroms); return EXIT_SUCCESS; } if (EQ(mode, "help")) { print_help_and_exit(argv0, 0); } fprintf(stderr, "error: unrecognized mode\n"); print_help_and_exit(argv0, 1); #undef EQ }
#define __STDC_WANT_LIB_EXT2__ 1 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include <assert.h> #include <unistd.h> #include <getopt.h> #include <omp.h> #include "samrecord.h" #include "align.h" #include "barcodes.h" #include "util.h" #include "main.h" // preprocess #include "cpp/main.h" #include "cpp/count.h" #include "cpp/correct.h" int num_threads_per_file = 1; int num_threads_for_files = 1; char *rg = "@RG\tID:rg1\tSM:sample1"; char *bx_index = "1"; char **pg_argv = NULL; int pg_argc = 0; PlatformProfile *tech; #define MAX_CHROM_NAME_LEN 256 static struct { char chrom_name[MAX_CHROM_NAME_LEN]; } *chroms; char * chrom_lookup(const chrom_t chrom) { return chroms[chrom].chrom_name; } chrom_t chrom_index(const char *chrom) { size_t len = 0; while (!isspace(chrom[len]) && chrom[len] != '\0') ++len; for (size_t i = 0; chroms[i].chrom_name[0] != '\0'; i++) { if (strncmp(chrom, chroms[i].chrom_name, len) == 0) { return i; } } assert(0); return 0; } void read_fai(FILE * fai_file) { const size_t lines = count_lines(fai_file); chroms = safe_malloc(lines * sizeof(*chroms)); size_t i = 0; while (fgets(chroms[i].chrom_name, MAX_CHROM_NAME_LEN, fai_file)) { size_t j = 0; while (!isspace(chroms[i].chrom_name[j])) ++j; chroms[i++].chrom_name[j] = '\0'; } chroms[i].chrom_name[0] = '\0'; } static int validate_read_group(const char *rg) { return strstr(rg, "@RG\t") == &rg[0] && strstr(rg, "\tID:") != NULL; } static void print_help_and_exit(const char *argv0, int error) { #define P(...) fprintf(out, __VA_ARGS__) FILE *out = error ? stderr : stdout; P("usage: %s <count|preproc|align|help> [options]\n", argv0); P("\n"); P("count: perform preliminary barcode count (takes interleaved FASTQ via stdin)\n"); P(" -w <whitelist path>: specify barcode whitelist [required]\n"); P(" -o <output prefix>: specify output prefix [required]\n"); P("\n"); P("preproc: preprocess barcoded FASTQ files (takes interleaved FASTQ via stdin)\n"); P(" -w <whitelist path>: specify whitelist [required]\n"); P(" -n <num buckets>: number of barcode buckets to make [500]\n"); P(" -h: apply Hamming-2 correction [off]\n"); P(" -o: <output directory> specify output directory [required]\n"); P(" -b: output BX:Z-formatted FASTQs [off]\n"); P(" -t <threads>: set number of threads [1]\n"); P(" all other arguments: list of all output prefixes generated by count stage\n"); P("\n"); P("align: choose best alignments based on barcodes\n"); P(" -1 <FASTQ1 path>: first (preprocessed and sorted) FASTQ file [none]\n"); P(" -2 <FASTQ2 path>: second (preprocessed and sorted) FASTQ file [none]\n"); P(" -s <EMA-FASTQ path>: specify special FASTQ path [none]\n"); P(" -x: multi-input mode; takes input files after flags and spawns a thread for each [off]\n"); P(" -r <FASTA path>: indexed reference [required]\n"); P(" -o <SAM file>: output SAM file [stdout]\n"); P(" -R <RG string>: full read group string (e.g. '@RG\\tID:foo\\tSM:bar') [none]\n"); P(" -d: apply fragment read density optimization [off]\n"); P(" -p <platform>: sequencing platform (one of '10x', 'tru', 'cpt') [10x]\n"); P(" -i <index>: index to follow 'BX' tag in SAM output [1]"); P(" -t <threads>: set number of threads [1]\n"); P(" all other arguments (only for -x): list of all preprocessed inputs\n"); P("\n"); P("help: print this help message\n"); exit(error ? EXIT_FAILURE : EXIT_SUCCESS); #undef P } int main(const int argc, char *argv[]) { #define EQ(s1, s2) (strcmp((s1), (s2)) == 0) const char *argv0 = argv[0]; if (argc < 2) { fprintf(stderr, "EMA version %s\n", VERSION); fprintf(stderr, "note: use '%s help' for usage information.\n", argv0); return EXIT_SUCCESS; } pg_argv = argv; pg_argc = argc; const char *mode = argv[1]; if (EQ(mode, "preproc")) { cppinit(); char *wl = NULL; int nbuckets = 500; int do_h2 = 0; char *out = NULL; int t = 1; char c; char do_bx_format = 0; while ((c = getopt(argc - 1, &argv[1], "w:n:hbo:t:")) != -1) { switch (c) { case 'w': wl = strdup(optarg); break; case 'n': nbuckets = atoi(optarg); break; case 'h': do_h2 = 1; break; case 'o': out = strdup(optarg); break; case 't': t = atoi(optarg); break; case 'b': do_bx_format = 1; break; default: print_help_and_exit(argv0, 1); } } if (wl == NULL) { fprintf(stderr, "error: specify barcode whitelist with -w\n"); exit(EXIT_FAILURE); } if (out == NULL) { fprintf(stderr, "error: specify output directory with -o\n"); exit(EXIT_FAILURE); } const size_t n_inputs = argc - optind - 1; if (n_inputs == 0) { fprintf(stderr, "warning: no input files specified; nothing to do\n"); exit(EXIT_SUCCESS); } const char **inputs = safe_malloc(n_inputs * sizeof(*inputs)); for (int i = optind + 1; i < argc; i++) { const int j = i - (optind + 1); //w.r.t.` inputs ` inputs[j] = strdup(argv[i]); } correct(wl, inputs, n_inputs, out, do_h2, 10 * MB, do_bx_format, t, nbuckets); return EXIT_SUCCESS; } if (EQ(mode, "count")) { cppinit(); char *wl = NULL; char *out = NULL; char c; while ((c = getopt(argc - 1, &argv[1], "w:o:")) != -1) { switch (c) { case 'w': wl = strdup(optarg); break; case 'o': out = strdup(optarg); break; default: print_help_and_exit(argv0, 1); } } if (wl == NULL) { fprintf(stderr, "error: specify barcode whitelist with -w\n"); exit(EXIT_FAILURE); } if (out == NULL) { fprintf(stderr, "error: specify output prefix with -o\n"); exit(EXIT_FAILURE); } count(wl, out, 1 * GB); return EXIT_SUCCESS; } if (EQ(mode, "align")) { char *ref = NULL; char *fq1 = NULL; char *fq2 = NULL; char *fqx = NULL; char *fai = NULL; char *out = NULL; int apply_opt = 0; int multi_input = 0; char *platform = "10x"; int t = 1; char c; while ((c = getopt(argc - 1, &argv[1], "r:1:2:s:xo:R:dp:i:t:")) != -1) { switch (c) { case 'r': ref = strdup(optarg); break; case '1': fq1 = strdup(optarg); break; case '2': fq2 = strdup(optarg); break; case 's': fqx = strdup(optarg); break; case 'x': multi_input = 1; break; case 'o': out = strdup(optarg); break; case 'R': rg = escape(strdup(optarg)); break; case 'd': apply_opt = 1; break; case 'p': platform = strdup(optarg); break; case 'i': bx_index = strdup(optarg); break; case 't': t = atoi(optarg); break; default: print_help_and_exit(argv0, 1); } } if (multi_input + (fqx != NULL) + (fq1 != NULL || fq2 != NULL) != 1) { fprintf(stderr, "error: must specify *exactly one* of -1/-2, -s or -x\n"); exit(EXIT_FAILURE); } if (fq1 == NULL && fq2 != NULL) { fprintf(stderr, "error: cannot specify -2 without -1\n"); exit(EXIT_FAILURE); } if (ref == NULL) { fprintf(stderr, "error: specify reference FASTA with -r\n"); exit(EXIT_FAILURE); } if (rg != NULL && !validate_read_group(rg)) { fprintf(stderr, "error: malformed read group: '%s'\n", rg); exit(EXIT_FAILURE); } if ((tech = get_platform_profile_by_name(platform)) == NULL) { fprintf(stderr, "error: invalid platform name: '%s'\n", platform); exit(EXIT_FAILURE); } FILE *fq1_file = NULL; FILE *fq2_file = NULL; FILE *fqx_file = NULL; FILE *out_file = (out == NULL ? stdout : fopen(out, "w")); if (!out_file) { IOERROR(out); } if (fqx != NULL) { fqx_file = fopen(fqx, "r"); if (!fqx_file) { IOERROR(fqx); } } else if (fq1 != NULL) { fq1_file = fopen(fq1, "r"); if (!fq1_file) { IOERROR(fq1); } if (fq2 != NULL) { fq2_file = fopen(fq2, "r"); if (!fq2_file) { IOERROR(fq2); } } else { fq2_file = fq1_file; } } #define FAI_EXT ".fai" fai = safe_malloc(strlen(ref) + strlen(FAI_EXT) + 1); strcpy(fai, ref); strcat(fai, FAI_EXT); #undef FAI_EXT FILE *fai_file = fopen(fai, "r"); if (!fai_file) { IOERROR(fai); } read_fai(fai_file); fclose(fai_file); bwa_init(ref); write_sam_header(out_file); if (multi_input) { num_threads_for_files = t; num_threads_per_file = 1; const size_t n_inputs = argc - optind - 1; if (n_inputs == 0) { fprintf(stderr, "warning: no input files specified; nothing to do\n"); exit(EXIT_SUCCESS); } FILE **inputs = safe_malloc(n_inputs * sizeof(*inputs)); for (int i = optind + 1; i < argc; i++) { const int j = i - (optind + 1); //w.r.t.` inputs ` and ` outputs ` const char *filename = strdup(argv[i]); inputs[j] = fopen(filename, "r"); if (!inputs[j]) { IOERROR(filename); } } omp_set_nested(1); omp_lock_t out_lock; omp_init_lock(&out_lock); #pragma omp parallel for num_threads(num_threads_for_files) for (size_t i = 0; i < n_inputs; i++) { find_clouds_and_align(NULL, NULL, inputs[i], out_file, apply_opt, NULL, &out_lock); fclose(inputs[i]); } omp_destroy_lock(&out_lock); free(inputs); } else { num_threads_for_files = 1; num_threads_per_file = t; find_clouds_and_align(fq1_file, fq2_file, fqx_file, out_file, apply_opt, NULL, NULL); if (fq1_file) fclose(fq1_file); if (fq2_file && fq2_file != fq1_file) fclose(fq2_file); if (fqx_file) fclose(fqx_file); } fclose(out_file); bwa_dealloc(); free(fai); free(chroms); return EXIT_SUCCESS; } if (EQ(mode, "help")) { print_help_and_exit(argv0, 0); } fprintf(stderr, "error: unrecognized mode\n"); print_help_and_exit(argv0, 1); #undef EQ }
HW4.c
/* * Write an OpenMP C code to compute * the mean and standard deviation of a list of numbers. * I/O will be performed by the Master serial. * */ #include <stdio.h> #include <omp.h> #include <math.h> #define THREADS 4 int main(int argc,char *argv[]) { long int n,i; float list[1000],mean,stdev,sum=0.0,sum2=0.0; printf("Enter the size of the list: "); scanf("%ld",&n); printf("Enter the numbers in the list: "); for(i=0;i<n;i++) scanf("%f",&list[i]); omp_set_num_threads(THREADS); #pragma omp parallel { #pragma omp for reduction(+:sum) for(i=0;i<n;i++) { sum=sum+list[i]; } #pragma omp master mean=sum/n; #pragma omp barrier #pragma omp for reduction(+:sum) for(i=0;i<n;i++) { sum2=sum2+(list[i]-mean)*(list[i]-mean); } #pragma omp master stdev=sqrt(sum2/n); } printf("Mean: %6.2f\nStandard Deviation: %6.4f\n",mean,stdev); return 0; }
/* * Write an OpenMP C code to compute the mean and standard deviation of a * list of numbers. I/O will be performed by the Master serial. * */ #include <stdio.h> #include <omp.h> #include <math.h> #define THREADS 4 int main(int argc, char *argv[]) { long int n, i; float list[1000], mean, stdev, sum = 0.0, sum2 = 0.0; printf("Enter the size of the list: "); scanf("%ld", &n); printf("Enter the numbers in the list: "); for (i = 0; i < n; i++) scanf("%f", &list[i]); omp_set_num_threads(THREADS); for (i = 0; i < n; i++) { sum = sum + list[i]; } mean = sum / n; for (i = 0; i < n; i++) { sum2 = sum2 + (list[i] - mean) * (list[i] - mean); } stdev = sqrt(sum2 / n); printf("Mean: %6.2f\nStandard Deviation: %6.4f\n", mean, stdev); return 0; }
/* * Write an OpenMP C code to compute the mean and standard deviation of a * list of numbers. I/O will be performed by the Master serial. * */ #include <stdio.h> #include <omp.h> #include <math.h> #define THREADS 4 int main(int argc, char *argv[]) { long int n, i; float list[1000], mean, stdev, sum = 0.0, sum2 = 0.0; printf("Enter the size of the list: "); scanf("%ld", &n); printf("Enter the numbers in the list: "); for (i = 0; i < n; i++) scanf("%f", &list[i]); omp_set_num_threads(THREADS); #pragma omp parallel { #pragma omp for reduction(+:sum) for (i = 0; i < n; i++) { sum = sum + list[i]; } #pragma omp master mean = sum / n; #pragma omp barrier #pragma omp for reduction(+:sum) for (i = 0; i < n; i++) { sum2 = sum2 + (list[i] - mean) * (list[i] - mean); } #pragma omp master stdev = sqrt(sum2 / n); } printf("Mean: %6.2f\nStandard Deviation: %6.4f\n", mean, stdev); return 0; }
integrator_leapfrog.c
/** * @file integrator.c * @brief Leap-frog integration scheme. * @author Hanno Rein <hanno@hanno-rein.de> * @details This file implements the leap-frog integration scheme. * This scheme is second order accurate, symplectic and well suited for * non-rotating coordinate systems. Note that the scheme is formally only * first order accurate when velocity dependent forces are present. * * @section LICENSE * Copyright (c) 2011 Hanno Rein, Shangfei Liu * * This file is part of rebound. * * rebound is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * rebound is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with rebound. If not, see <http://www.gnu.org/licenses/>. * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <time.h> #include "rebound.h" // Leapfrog integrator (Drift-Kick-Drift) // for non-rotating frame. void reb_integrator_leapfrog_part1(struct reb_simulation* r){ r->gravity_ignore_terms = 0; const int N = r->N; struct reb_particle* restrict const particles = r->particles; const double dt = r->dt; #pragma omp parallel for schedule(guided) for (int i=0;i<N;i++){ particles[i].x += 0.5* dt * particles[i].vx; particles[i].y += 0.5* dt * particles[i].vy; particles[i].z += 0.5* dt * particles[i].vz; } r->t+=dt/2.; } void reb_integrator_leapfrog_part2(struct reb_simulation* r){ const int N = r->N; struct reb_particle* restrict const particles = r->particles; const double dt = r->dt; #pragma omp parallel for schedule(guided) for (int i=0;i<N;i++){ particles[i].vx += dt * particles[i].ax; particles[i].vy += dt * particles[i].ay; particles[i].vz += dt * particles[i].az; particles[i].x += 0.5* dt * particles[i].vx; particles[i].y += 0.5* dt * particles[i].vy; particles[i].z += 0.5* dt * particles[i].vz; } r->t+=dt/2.; r->dt_last_done = r->dt; } void reb_integrator_leapfrog_synchronize(struct reb_simulation* r){ // Do nothing. } void reb_integrator_leapfrog_reset(struct reb_simulation* r){ // Do nothing. }
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <time.h> #include "rebound.h" // Leapfrog integrator(Drift - Kick - Drift) // for non -rotating frame. void reb_integrator_leapfrog_part1(struct reb_simulation *r) { r->gravity_ignore_terms = 0; const int N = r->N; struct reb_particle *restrict const particles = r->particles; const double dt = r->dt; for (int i = 0; i < N; i++) { particles[i].x += 0.5 * dt * particles[i].vx; particles[i].y += 0.5 * dt * particles[i].vy; particles[i].z += 0.5 * dt * particles[i].vz; } r->t += dt / 2.; } void reb_integrator_leapfrog_part2(struct reb_simulation *r) { const int N = r->N; struct reb_particle *restrict const particles = r->particles; const double dt = r->dt; for (int i = 0; i < N; i++) { particles[i].vx += dt * particles[i].ax; particles[i].vy += dt * particles[i].ay; particles[i].vz += dt * particles[i].az; particles[i].x += 0.5 * dt * particles[i].vx; particles[i].y += 0.5 * dt * particles[i].vy; particles[i].z += 0.5 * dt * particles[i].vz; } r->t += dt / 2.; r->dt_last_done = r->dt; } void reb_integrator_leapfrog_synchronize(struct reb_simulation *r) { //Do nothing. } void reb_integrator_leapfrog_reset(struct reb_simulation *r) { //Do nothing. }
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <time.h> #include "rebound.h" // Leapfrog integrator(Drift - Kick - Drift) // for non -rotating frame. void reb_integrator_leapfrog_part1(struct reb_simulation *r) { r->gravity_ignore_terms = 0; const int N = r->N; struct reb_particle *restrict const particles = r->particles; const double dt = r->dt; #pragma omp parallel for schedule(guided) for (int i = 0; i < N; i++) { particles[i].x += 0.5 * dt * particles[i].vx; particles[i].y += 0.5 * dt * particles[i].vy; particles[i].z += 0.5 * dt * particles[i].vz; } r->t += dt / 2.; } void reb_integrator_leapfrog_part2(struct reb_simulation *r) { const int N = r->N; struct reb_particle *restrict const particles = r->particles; const double dt = r->dt; #pragma omp parallel for schedule(guided) for (int i = 0; i < N; i++) { particles[i].vx += dt * particles[i].ax; particles[i].vy += dt * particles[i].ay; particles[i].vz += dt * particles[i].az; particles[i].x += 0.5 * dt * particles[i].vx; particles[i].y += 0.5 * dt * particles[i].vy; particles[i].z += 0.5 * dt * particles[i].vz; } r->t += dt / 2.; r->dt_last_done = r->dt; } void reb_integrator_leapfrog_synchronize(struct reb_simulation *r) { //Do nothing. } void reb_integrator_leapfrog_reset(struct reb_simulation *r) { //Do nothing. }
par_csr_matvec.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride > 0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); //hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts) for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv * num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv * num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* unpack recv data on device */ if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg)) { hypre_ParCSRCommPkgWorkSpace(comm_pkg) = hypre_TAlloc( char, (2 * sizeof(HYPRE_Int) + sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE ); } hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data, hypre_ParCSRCommPkgWorkSpace(comm_pkg)); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); #pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts) for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size) { ierr = 12; } if (num_cols != x_size && num_rows != y_size) { ierr = 13; } if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
/****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride > 0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); //hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv * num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv * num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* unpack recv data on device */ if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg)) { hypre_ParCSRCommPkgWorkSpace(comm_pkg) = hypre_TAlloc( char, (2 * sizeof(HYPRE_Int) + sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE ); } hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data, hypre_ParCSRCommPkgWorkSpace(comm_pkg)); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size) { ierr = 12; } if (num_cols != x_size && num_rows != y_size) { ierr = 13; } if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
/****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride > 0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); //hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts) for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv * num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv * num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* unpack recv data on device */ if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg)) { hypre_ParCSRCommPkgWorkSpace(comm_pkg) = hypre_TAlloc( char, (2 * sizeof(HYPRE_Int) + sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE ); } hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data, hypre_ParCSRCommPkgWorkSpace(comm_pkg)); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); #pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts) for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size) { ierr = 12; } if (num_cols != x_size && num_rows != y_size) { ierr = 13; } if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
multi_device.h
// // Created by ss on 18-6-18. // #ifndef THUNDERGBM_MULTI_DEVICE_H #define THUNDERGBM_MULTI_DEVICE_H #include "thundergbm/common.h" //switch to specific device and do something, then switch back to the original device //FIXME make this macro into a function? #define DO_ON_DEVICE(device_id, something) \ do { \ int org_device_id = 0; \ CUDA_CHECK(cudaGetDevice(&org_device_id)); \ CUDA_CHECK(cudaSetDevice(device_id)); \ something; \ CUDA_CHECK(cudaSetDevice(org_device_id)); \ } while (false) /** * Do something on multiple devices, then switch back to the original device * * * example: * * DO_ON_MULTI_DEVICES(n_devices, [&](int device_id){ * //do_something_on_device(device_id); * }); */ template<typename L> void DO_ON_MULTI_DEVICES(int n_devices, L do_something) { int org_device_id = 0; CUDA_CHECK(cudaGetDevice(&org_device_id)); #pragma omp parallel for num_threads(n_devices) for (int device_id = 0; device_id < n_devices; device_id++) { CUDA_CHECK(cudaSetDevice(device_id)); do_something(device_id); } CUDA_CHECK(cudaSetDevice(org_device_id)); } #endif //THUNDERGBM_MULTI_DEVICE_H
// // Created by ss on 18-6-18. // #ifndef THUNDERGBM_MULTI_DEVICE_H #define THUNDERGBM_MULTI_DEVICE_H #include "thundergbm/common.h" //switch to specific device and do something, then switch back to the original device //FIXME make this macro into a function? #define DO_ON_DEVICE(device_id, something) \ do { \ int org_device_id = 0; \ CUDA_CHECK(cudaGetDevice(&org_device_id)); \ CUDA_CHECK(cudaSetDevice(device_id)); \ something; \ CUDA_CHECK(cudaSetDevice(org_device_id)); \ } while (false) /** * Do something on multiple devices, then switch back to the original device * * * example: * * DO_ON_MULTI_DEVICES(n_devices, [&](int device_id){ * //do_something_on_device(device_id); * }); */ template<typename L> void DO_ON_MULTI_DEVICES(int n_devices, L do_something) { int org_device_id = 0; CUDA_CHECK(cudaGetDevice(&org_device_id)); for (int device_id = 0; device_id < n_devices; device_id++) { CUDA_CHECK(cudaSetDevice(device_id)); do_something(device_id); } CUDA_CHECK(cudaSetDevice(org_device_id)); } #endif //THUNDERGBM_MULTI_DEVICE_H
// // Created by ss on 18-6-18. // #ifndef THUNDERGBM_MULTI_DEVICE_H #define THUNDERGBM_MULTI_DEVICE_H #include "thundergbm/common.h" //switch to specific device and do something, then switch back to the original device //FIXME make this macro into a function? #define DO_ON_DEVICE(device_id, something) \ do { \ int org_device_id = 0; \ CUDA_CHECK(cudaGetDevice(&org_device_id)); \ CUDA_CHECK(cudaSetDevice(device_id)); \ something; \ CUDA_CHECK(cudaSetDevice(org_device_id)); \ } while (false) /** * Do something on multiple devices, then switch back to the original device * * * example: * * DO_ON_MULTI_DEVICES(n_devices, [&](int device_id){ * //do_something_on_device(device_id); * }); */ template<typename L> void DO_ON_MULTI_DEVICES(int n_devices, L do_something) { int org_device_id = 0; CUDA_CHECK(cudaGetDevice(&org_device_id)); #pragma omp parallel for num_threads(n_devices) for (int device_id = 0; device_id < n_devices; device_id++) { CUDA_CHECK(cudaSetDevice(device_id)); do_something(device_id); } CUDA_CHECK(cudaSetDevice(org_device_id)); } #endif //THUNDERGBM_MULTI_DEVICE_H
edge_vol_int.c
/****************************************************************************** ** Copyright (c) 2016-2019, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /*#define EDGE_HP_1G*/ /*#define HANDLE_AMOK*/ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif static void* edge_hp_malloc( size_t nbytes, size_t alignment ) { void* ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if ( nbytes > num_large_pages*1073741824L ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if ( nbytes > num_large_pages*2097152UL ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc( nbytes, alignment ); #endif return ret_ptr; } static void edge_hp_free( void* ptr, size_t nbytes ) { LIBXSMM_UNUSED( nbytes ); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free( ptr ); #endif } #if defined(__AVX512F__) static void matMulFusedAC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), _mm512_set1_pd( i_beta ) ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { vc = _mm512_fmadd_pd( _mm512_set1_pd( i_b[l_k*i_ldB + l_n] ), _mm512_loadu_pd( &(i_a[l_m*i_ldA*8 + l_k*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } static void matMulFusedBC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), _mm512_set1_pd( i_beta ) ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { vc = _mm512_fmadd_pd( _mm512_set1_pd( i_a[l_m*i_ldA + l_k] ), _mm512_loadu_pd( &(i_b[l_k*i_ldB*8 + l_n*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } #endif static void amok_detect( const double* i_runtimes, size_t* io_amoks, const size_t i_workers ) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { time_avg += i_runtimes[8*i]; } } time_avg = time_avg/((double)(i_workers-io_amoks[8*i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { if ( i_runtimes[8*i] > time_avg*1.07 ) { /* this is the amok condition */ io_amoks[8*i_workers]++; io_amoks[8*i] = 1; } } } } static void amok_balance( const size_t* i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t* io_chunk, size_t* io_mystart, size_t* io_myend ) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8*i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8*i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for ( l_z = 0; l_z < i_mytid; l_z++) { if ( i_amoks[8*l_z] != 0 ) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset+1) * l_chunk < i_worksize) ? ((l_tid_offset+1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char* argv[]) { char* mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double* mat_a_values; libxsmm_dmmfunction a_kernel; char* mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double* mat_b_values; libxsmm_dmmfunction b_kernel; char* mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double* mat_c_values; libxsmm_dmmfunction c_kernel; char* mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double* mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double* q; double* qt; double* qs; double* star; double* global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants*num_quants; double* l_total_thread; double* l_cur_thread_time; double time_max; double time_min; double time_avg; size_t* amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes*num_quants*num_cfr; #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_num_threads = omp_get_num_threads(); } } #else l_num_threads = 1; #endif l_total_thread = (double*)malloc(8*l_num_threads*sizeof(double)); l_cur_thread_time = (double*)malloc(8*l_num_threads*sizeof(double)); amoks = (size_t*)malloc(8*(l_num_threads+1)*sizeof(size_t)); for ( i = 0; i < 8*((int)l_num_threads+1); i++ ) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz ); edge_sparse_csr_reader_double( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz ); edge_sparse_csr_reader_double( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz ); edge_sparse_csr_reader_double( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz ); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_a_rowptr, mat_a_colidx, (const void*)mat_a_values ).dmm; b_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_b_rowptr, mat_b_colidx, (const void*)mat_b_values ).dmm; c_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_c_rowptr, mat_c_colidx, (const void*)mat_c_values ).dmm; st_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_star, mat_st_rowptr, mat_st_colidx, (const void*)mat_st_values ).dmm; if ( a_kernel == 0 ) { printf("a kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("b kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("c kernel could not be built -> exit!"); exit(-1); } if ( st_kernel == 0 ) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void*)edge_hp_malloc( 5*1024*1024, 2097152 ); memcpy( onegcode, (void*) a_kernel, 1505 ); memcpy( onegcode+(1*1024*1024)+64, (void*) b_kernel, 2892 ); memcpy( onegcode+(2*1024*1024)+128, (void*) c_kernel, 3249 ); memcpy( onegcode+(3*1024*1024)+196, (void*)st_kernel, 11010 ); a_kernel = (libxsmm_dmmfunction)onegcode; b_kernel = (libxsmm_dmmfunction)(onegcode+(1*1024*1024)+64); c_kernel = (libxsmm_dmmfunction)(onegcode+(2*1024*1024)+128); st_kernel = (libxsmm_dmmfunction)(onegcode+(3*1024*1024)+196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); q = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); qt = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems*3*l_star_ent*sizeof(double))) / ( 1024.0*1024.0 ) ); star = (double*)edge_hp_malloc( num_elems*3*l_star_ent*sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3*num_modes*num_modes*sizeof(double))) / ( 1024.0*1024 ) ); global = (double*)edge_hp_malloc( 3*num_modes*num_modes*sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); qs = (double*)edge_hp_malloc( l_num_threads*num_modes*num_quants*num_cfr*sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz*3; j++) { star[(i*3*mat_st_nnz)+j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes*num_modes; j++) { global[(i*num_modes*num_modes)+j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i, j) #endif { #if defined(_OPENMP) int mytid = omp_get_thread_num(); #else int mytid = 0; #endif libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8*l_num_threads]) { cur_amoks = amoks[8*l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel( star+(j*3*mat_st_nnz) , qt+(j*elem_size), qs+(mytid*elem_size) ); a_kernel( qs+(mytid*elem_size), global , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+mat_st_nnz , qt+(j*elem_size), qs+(mytid*elem_size) ); b_kernel( qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); c_kernel( qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #else matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global, q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+mat_st_nnz, qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8*mytid] = libxsmm_timer_duration( mystart, myend ); l_total_thread[8*mytid] += libxsmm_timer_duration( mystart, myend ); #if defined(_OPENMP) #pragma omp barrier #endif #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect( l_cur_thread_time, amoks, l_num_threads ); } #if defined(_OPENMP) #pragma omp barrier #endif #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if( amoks[8*i] == 0 ) { if( l_total_thread[8*i] > time_max) time_max = l_total_thread[8*i]; if( l_total_thread[8*i] < time_min) time_min = l_total_thread[8*i]; time_avg += l_total_thread[8*i]; } } time_avg = time_avg/((double)(l_num_threads-amoks[8*l_num_threads])); flops_vol = (double)num_quants * (double)mat_a_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_b_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_c_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_modes * (double)mat_st_nnz * (double)num_cfr * 6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8*l_num_threads]); for ( i = 0; i < (int)l_num_threads; i++ ) { if ( amoks[8*i] != 0 ) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) ); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
/* * Alexander Heinecke (Intel Corp.) **************************************************************************** */ #include "edge_proxy_common.h" #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> /* #define EDGE_HP_1G */ /* #define HANDLE_AMOK */ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif static void * edge_hp_malloc(size_t nbytes, size_t alignment) { void *ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if (nbytes > num_large_pages * 1073741824L) { num_large_pages++; } nbytes = (size_t) num_large_pages *1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if (nbytes > num_large_pages * 2097152UL) { num_large_pages++; } nbytes = (size_t) num_large_pages *2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc(nbytes, alignment); #endif return ret_ptr; } static void edge_hp_free(void *ptr, size_t nbytes) { LIBXSMM_UNUSED(nbytes); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free(ptr); #endif } #if defined(__AVX512F__) static void matMulFusedAC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), _mm512_set1_pd(i_beta)) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { vc = _mm512_fmadd_pd(_mm512_set1_pd(i_b[l_k * i_ldB + l_n]), _mm512_loadu_pd(&(i_a[l_m * i_ldA * 8 + l_k * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } static void matMulFusedBC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), _mm512_set1_pd(i_beta)) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { vc = _mm512_fmadd_pd(_mm512_set1_pd(i_a[l_m * i_ldA + l_k]), _mm512_loadu_pd(&(i_b[l_k * i_ldB * 8 + l_n * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } #endif static void amok_detect(const double *i_runtimes, size_t * io_amoks, const size_t i_workers) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { time_avg += i_runtimes[8 * i]; } } time_avg = time_avg / ((double)(i_workers - io_amoks[8 * i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { if (i_runtimes[8 * i] > time_avg * 1.07) { /* this is the amok condition */ io_amoks[8 * i_workers]++; io_amoks[8 * i] = 1; } } } } static void amok_balance(const size_t * i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t * io_chunk, size_t * io_mystart, size_t * io_myend) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8 * i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8 * i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for (l_z = 0; l_z < i_mytid; l_z++) { if (i_amoks[8 * l_z] != 0) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset + 1) * l_chunk < i_worksize) ? ((l_tid_offset + 1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char *argv[]) { char *mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double *mat_a_values; libxsmm_dmmfunction a_kernel; char *mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double *mat_b_values; libxsmm_dmmfunction b_kernel; char *mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double *mat_c_values; libxsmm_dmmfunction c_kernel; char *mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double *mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double *q; double *qt; double *qs; double *star; double *global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants * num_quants; double *l_total_thread; double *l_cur_thread_time; double time_max; double time_min; double time_avg; size_t *amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes * num_quants * num_cfr; l_total_thread = (double *)malloc(8 * l_num_threads * sizeof(double)); l_cur_thread_time = (double *)malloc(8 * l_num_threads * sizeof(double)); amoks = (size_t *) malloc(8 * (l_num_threads + 1) * sizeof(size_t)); for (i = 0; i < 8 * ((int)l_num_threads + 1); i++) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double(mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz); edge_sparse_csr_reader_double(mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz); edge_sparse_csr_reader_double(mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz); edge_sparse_csr_reader_double(mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_stiff, mat_a_rowptr, mat_a_colidx, (const void *)mat_a_values).dmm; b_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_stiff, mat_b_rowptr, mat_b_colidx, (const void *)mat_b_values).dmm; c_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_stiff, mat_c_rowptr, mat_c_colidx, (const void *)mat_c_values).dmm; st_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_star, mat_st_rowptr, mat_st_colidx, (const void *)mat_st_values).dmm; if (a_kernel == 0) { printf("a kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("b kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("c kernel could not be built -> exit!"); exit(-1); } if (st_kernel == 0) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void *)edge_hp_malloc(5 * 1024 * 1024, 2097152); memcpy(onegcode, (void *)a_kernel, 1505); memcpy(onegcode + (1 * 1024 * 1024) + 64, (void *)b_kernel, 2892); memcpy(onegcode + (2 * 1024 * 1024) + 128, (void *)c_kernel, 3249); memcpy(onegcode + (3 * 1024 * 1024) + 196, (void *)st_kernel, 11010); a_kernel = (libxsmm_dmmfunction) onegcode; b_kernel = (libxsmm_dmmfunction) (onegcode + (1 * 1024 * 1024) + 64); c_kernel = (libxsmm_dmmfunction) (onegcode + (2 * 1024 * 1024) + 128); st_kernel = (libxsmm_dmmfunction) (onegcode + (3 * 1024 * 1024) + 196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); q = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qt = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems * 3 * l_star_ent * sizeof(double))) / (1024.0 * 1024.0)); star = (double *)edge_hp_malloc(num_elems * 3 * l_star_ent * sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3 * num_modes * num_modes * sizeof(double))) / (1024.0 * 1024)); global = (double *)edge_hp_malloc(3 * num_modes * num_modes * sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qs = (double *)edge_hp_malloc(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz * 3; j++) { star[(i * 3 * mat_st_nnz) + j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes * num_modes; j++) { global[(i * num_modes * num_modes) + j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); { libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8 * l_num_threads]) { cur_amoks = amoks[8 * l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel(star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); a_kernel(qs + (mytid * elem_size), global, q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); b_kernel(qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); c_kernel(qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #else matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global, q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8 * mytid] = libxsmm_timer_duration(mystart, myend); l_total_thread[8 * mytid] += libxsmm_timer_duration(mystart, myend); #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect(l_cur_thread_time, amoks, l_num_threads); } #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] == 0) { if (l_total_thread[8 * i] > time_max) time_max = l_total_thread[8 * i]; if (l_total_thread[8 * i] < time_min) time_min = l_total_thread[8 * i]; time_avg += l_total_thread[8 * i]; } } time_avg = time_avg / ((double)(l_num_threads - amoks[8 * l_num_threads])); flops_vol = (double)num_quants *(double)mat_a_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_b_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_c_nnz *(double)num_cfr *2.0; flops_vol += (double)num_modes *(double)mat_st_nnz *(double)num_cfr *6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8 * l_num_threads]); for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] != 0) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0 * 1024.0 * 1024.0)); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
/* * Alexander Heinecke (Intel Corp.) **************************************************************************** */ #include "edge_proxy_common.h" #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* #define EDGE_HP_1G */ /* #define HANDLE_AMOK */ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif static void * edge_hp_malloc(size_t nbytes, size_t alignment) { void *ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if (nbytes > num_large_pages * 1073741824L) { num_large_pages++; } nbytes = (size_t) num_large_pages *1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if (nbytes > num_large_pages * 2097152UL) { num_large_pages++; } nbytes = (size_t) num_large_pages *2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /* * ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, * MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); */ ret_ptr = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0); if ((ret_ptr == (void *)(-1))) { fprintf(stderr, "2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc(nbytes, alignment); #endif return ret_ptr; } static void edge_hp_free(void *ptr, size_t nbytes) { LIBXSMM_UNUSED(nbytes); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free(ptr); #endif } #if defined(__AVX512F__) static void matMulFusedAC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), _mm512_set1_pd(i_beta)) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { vc = _mm512_fmadd_pd(_mm512_set1_pd(i_b[l_k * i_ldB + l_n]), _mm512_loadu_pd(&(i_a[l_m * i_ldA * 8 + l_k * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } static void matMulFusedBC(unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c) { unsigned int l_m, l_n, l_k; for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd(_mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])), _mm512_set1_pd(i_beta)) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } for (l_m = 0; l_m < i_m; l_m++) { for (l_n = 0; l_n < i_n; l_n++) { __m512d vc = _mm512_loadu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0])); for (l_k = 0; l_k < i_k; l_k++) { vc = _mm512_fmadd_pd(_mm512_set1_pd(i_a[l_m * i_ldA + l_k]), _mm512_loadu_pd(&(i_b[l_k * i_ldB * 8 + l_n * 8 + 0])), vc); } _mm512_storeu_pd(&(o_c[l_m * i_ldC * 8 + l_n * 8 + 0]), vc); } } } #endif static void amok_detect(const double *i_runtimes, size_t * io_amoks, const size_t i_workers) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { time_avg += i_runtimes[8 * i]; } } time_avg = time_avg / ((double)(i_workers - io_amoks[8 * i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if (io_amoks[8 * i] == 0) { if (i_runtimes[8 * i] > time_avg * 1.07) { /* this is the amok condition */ io_amoks[8 * i_workers]++; io_amoks[8 * i] = 1; } } } } static void amok_balance(const size_t * i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t * io_chunk, size_t * io_mystart, size_t * io_myend) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8 * i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8 * i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for (l_z = 0; l_z < i_mytid; l_z++) { if (i_amoks[8 * l_z] != 0) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset + 1) * l_chunk < i_worksize) ? ((l_tid_offset + 1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char *argv[]) { char *mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double *mat_a_values; libxsmm_dmmfunction a_kernel; char *mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double *mat_b_values; libxsmm_dmmfunction b_kernel; char *mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double *mat_c_values; libxsmm_dmmfunction c_kernel; char *mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double *mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double *q; double *qt; double *qs; double *star; double *global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants * num_quants; double *l_total_thread; double *l_cur_thread_time; double time_max; double time_min; double time_avg; size_t *amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes * num_quants * num_cfr; #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_num_threads = omp_get_num_threads(); } } #else l_num_threads = 1; #endif l_total_thread = (double *)malloc(8 * l_num_threads * sizeof(double)); l_cur_thread_time = (double *)malloc(8 * l_num_threads * sizeof(double)); amoks = (size_t *) malloc(8 * (l_num_threads + 1) * sizeof(size_t)); for (i = 0; i < 8 * ((int)l_num_threads + 1); i++) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double(mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz); edge_sparse_csr_reader_double(mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz); edge_sparse_csr_reader_double(mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz); edge_sparse_csr_reader_double(mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_stiff, mat_a_rowptr, mat_a_colidx, (const void *)mat_a_values).dmm; b_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_stiff, mat_b_rowptr, mat_b_colidx, (const void *)mat_b_values).dmm; c_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_stiff, mat_c_rowptr, mat_c_colidx, (const void *)mat_c_values).dmm; st_kernel = libxsmm_create_xcsr_soa(l_xgemm_desc_star, mat_st_rowptr, mat_st_colidx, (const void *)mat_st_values).dmm; if (a_kernel == 0) { printf("a kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("b kernel could not be built -> exit!"); exit(-1); } if (b_kernel == 0) { printf("c kernel could not be built -> exit!"); exit(-1); } if (st_kernel == 0) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void *)edge_hp_malloc(5 * 1024 * 1024, 2097152); memcpy(onegcode, (void *)a_kernel, 1505); memcpy(onegcode + (1 * 1024 * 1024) + 64, (void *)b_kernel, 2892); memcpy(onegcode + (2 * 1024 * 1024) + 128, (void *)c_kernel, 3249); memcpy(onegcode + (3 * 1024 * 1024) + 196, (void *)st_kernel, 11010); a_kernel = (libxsmm_dmmfunction) onegcode; b_kernel = (libxsmm_dmmfunction) (onegcode + (1 * 1024 * 1024) + 64); c_kernel = (libxsmm_dmmfunction) (onegcode + (2 * 1024 * 1024) + 128); st_kernel = (libxsmm_dmmfunction) (onegcode + (3 * 1024 * 1024) + 196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); q = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qt = (double *)edge_hp_malloc(num_elems * num_modes * num_quants * num_cfr * sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems * 3 * l_star_ent * sizeof(double))) / (1024.0 * 1024.0)); star = (double *)edge_hp_malloc(num_elems * 3 * l_star_ent * sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3 * num_modes * num_modes * sizeof(double))) / (1024.0 * 1024)); global = (double *)edge_hp_malloc(3 * num_modes * num_modes * sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double))) / (1024.0 * 1024.0)); qs = (double *)edge_hp_malloc(l_num_threads * num_modes * num_quants * num_cfr * sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i * elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz * 3; j++) { star[(i * 3 * mat_st_nnz) + j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes * num_modes; j++) { global[(i * num_modes * num_modes) + j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) #pragma omp parallel private(i, j) #endif { #if defined(_OPENMP) int mytid = omp_get_thread_num(); #else int mytid = 0; #endif libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8 * l_num_threads]) { cur_amoks = amoks[8 * l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance(amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel(star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); a_kernel(qs + (mytid * elem_size), global, q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); b_kernel(qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); st_kernel(star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); c_kernel(qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #else matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global, q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + mat_st_nnz, qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(num_modes * num_modes), q + (j * elem_size)); matMulFusedBC(8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star + (j * 3 * mat_st_nnz) + (2 * mat_st_nnz), qt + (j * elem_size), qs + (mytid * elem_size)); matMulFusedAC(8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs + (mytid * elem_size), global +(2 * num_modes * num_modes), q + (j * elem_size)); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8 * mytid] = libxsmm_timer_duration(mystart, myend); l_total_thread[8 * mytid] += libxsmm_timer_duration(mystart, myend); #if defined(_OPENMP) #pragma omp barrier #endif #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect(l_cur_thread_time, amoks, l_num_threads); } #if defined(_OPENMP) #pragma omp barrier #endif #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] == 0) { if (l_total_thread[8 * i] > time_max) time_max = l_total_thread[8 * i]; if (l_total_thread[8 * i] < time_min) time_min = l_total_thread[8 * i]; time_avg += l_total_thread[8 * i]; } } time_avg = time_avg / ((double)(l_num_threads - amoks[8 * l_num_threads])); flops_vol = (double)num_quants *(double)mat_a_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_b_nnz *(double)num_cfr *2.0; flops_vol += (double)num_quants *(double)mat_c_nnz *(double)num_cfr *2.0; flops_vol += (double)num_modes *(double)mat_st_nnz *(double)num_cfr *6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8 * l_num_threads]); for (i = 0; i < (int)l_num_threads; i++) { if (amoks[8 * i] != 0) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0 * 1024.0 * 1024.0)); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
GB_unop__identity_fp64_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_uint8) // op(A') function: GB (_unop_tran__identity_fp64_uint8) // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_uint8) ( double *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_uint8) // op(A') function: GB (_unop_tran__identity_fp64_uint8) // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_uint8) ( double *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_uint8) // op(A') function: GB (_unop_tran__identity_fp64_uint8) // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_uint8) ( double *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
laplace_par.h
#ifndef _LAPLACE_PAR_ #define _LAPLACE_PAR_ #include<omp.h> template<int SIZE> inline void initialize(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2]) { //TODO implement your solution in here #pragma omp parallel for for (int i = 0; i < SIZE + 2; i++) for (int j = 0; j < SIZE + 2; j++) { a[i][j] = 0.0; b[i][j] = 0.0; } } template<int SIZE> inline void time_step(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2], int n) { //TODO implement your solution in here if (n % 2 == 0) { #pragma omp parallel for for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) b[i][j] = (a[i + 1][j] + a[i - 1][j] + a[i][j - 1] + a[i][j + 1]) *0.25; } else { #pragma omp parallel for for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) a[i][j] = (b[i + 1][j] + b[i - 1][j] + b[i][j - 1] + b[i][j + 1])*0.25; } } #endif // !_LAPLACE_PAR_
#ifndef _LAPLACE_PAR_ #define _LAPLACE_PAR_ #include<omp.h> template < int SIZE > inline void initialize(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2]) { //TODO implement your solution in here for (int i = 0; i < SIZE + 2; i++) for (int j = 0; j < SIZE + 2; j++) { a[i][j] = 0.0; b[i][j] = 0.0; } } template < int SIZE > inline void time_step(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2], int n) { //TODO implement your solution in here if (n % 2 == 0) { for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) b[i][j] = (a[i + 1][j] + a[i - 1][j] + a[i][j - 1] + a[i][j + 1]) * 0.25; } else { for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) a[i][j] = (b[i + 1][j] + b[i - 1][j] + b[i][j - 1] + b[i][j + 1]) * 0.25; } } #endif /* // !_LAPLACE_PAR_ */
#ifndef _LAPLACE_PAR_ #define _LAPLACE_PAR_ #include<omp.h> template < int SIZE > inline void initialize(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2]) { //TODO implement your solution in here #pragma omp parallel for for (int i = 0; i < SIZE + 2; i++) for (int j = 0; j < SIZE + 2; j++) { a[i][j] = 0.0; b[i][j] = 0.0; } } template < int SIZE > inline void time_step(double a[SIZE + 2][SIZE + 2], double b[SIZE + 2][SIZE + 2], int n) { //TODO implement your solution in here if (n % 2 == 0) { #pragma omp parallel for for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) b[i][j] = (a[i + 1][j] + a[i - 1][j] + a[i][j - 1] + a[i][j + 1]) * 0.25; } else { #pragma omp parallel for for (int i = 1; i < SIZE + 1; i++) for (int j = 1; j < SIZE + 1; j++) a[i][j] = (b[i + 1][j] + b[i - 1][j] + b[i][j - 1] + b[i][j + 1]) * 0.25; } } #endif /* // !_LAPLACE_PAR_ */
hello_omp.c
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel { #pragma omp single { printf("\nhello world "); printf("with %d threads out of %d available \n\n", omp_get_num_threads(), omp_get_num_procs()); } printf("hello from thread number %d \n", omp_get_thread_num()); fflush(stdout); } printf("\ngoodbye world \n\n"); return 0; }
#include <stdio.h> #include <omp.h> int main() { #pragma omp single { printf("\nhello world "); printf("with %d threads out of %d available \n\n", omp_get_num_threads(), omp_get_num_procs()); } printf("hello from thread number %d \n", omp_get_thread_num()); fflush(stdout); printf("\ngoodbye world \n\n"); return 0; }
#include <stdio.h> #include <omp.h> int main() { #pragma omp parallel { #pragma omp single { printf("\nhello world "); printf("with %d threads out of %d available \n\n", omp_get_num_threads(), omp_get_num_procs()); } printf("hello from thread number %d \n", omp_get_thread_num()); fflush(stdout); } printf("\ngoodbye world \n\n"); return 0; }
bfs_csr_mt.c
#include "graph_defs.h" #include "prefetcher.h" typedef struct bfs_metadata_st { char touched; volatile unsigned long queue_next; } bfs_metadata_t; static volatile unsigned long queue_head = ULONG_MAX; static volatile unsigned long vertex_position = 0; static bfs_metadata_t *metadata; static csr_t * volatile graph; unsigned long MAX_CACHE = ULONG_MAX; long MIN_CACHE = 0; unsigned long visited = 0; void prefetcher_random_callback(unsigned long *laf, unsigned long laf_size, unsigned long ift) { static unsigned long old_hoq = ULONG_MAX; unsigned long current_hoq = ULONG_MAX; static unsigned long ra_depth = 0; static char preload = 0; static long pf_visited = 0; unsigned long entries = 0; /* Fill in inner-loop entries from BFS queue */ /* if ((preload == 0) && (ra_depth > MAX_CACHE)) { preload = 1; current_hoq = ULONG_MAX; } */ current_hoq = old_hoq; if ((current_hoq == ULONG_MAX) || (((signed long) (pf_visited - visited)) > MIN_CACHE)/*|| (ra_depth > MIN_CACHE)*/) { current_hoq = queue_head; pf_visited = visited; // ra_depth = 0; } // if (((signed long)(pf_visited - visited)) > MIN_CACHE) return; /* if(current_hoq != ULONG_MAX) { current_hoq = metadata[current_hoq].queue_next; } */ while (entries != ift && current_hoq != ULONG_MAX) { unsigned long page = graph->index[current_hoq]; unsigned long end = graph->index[current_hoq + 1]; page = page >> (ASSUME_PAGE_SHIFT + 3); /* offset is in bits ! */ end = end >> (ASSUME_PAGE_SHIFT + 3); // if(laf[HASH_MODULO(page, laf_size)] != page) { // laf[HASH_MODULO(page, laf_size)] = page; // for (; page <= end; page++) { // if (entries==ift) break; laf[entries] = page; if (end > page) laf[entries + (2 * laf_size)] = end - page; entries++; // } // } old_hoq = current_hoq; current_hoq = metadata[current_hoq].queue_next; pf_visited++; } ra_depth += entries; } unsigned long prefetcher_sequential_callback(unsigned long* aux_offset) { unsigned long offset = graph->index[vertex_position]; return offset >> (ASSUME_PAGE_SHIFT + 3); } unsigned long alist_entries_seen = 0; // #pragma omp threadprivate(current_vertex) unsigned long total_queue_demands = 0; unsigned long queue_above_threshold = 0; unsigned long queue_length = 0; /* returns number of connected components */ static unsigned long bfs(csr_t *graph, unsigned long start_node) { unsigned long i; unsigned long components = 0; unsigned long queue_tail = ULONG_MAX; unsigned long nq_head = ULONG_MAX; unsigned long nq_tail = ULONG_MAX; char* finished_flag = NULL; unsigned long time_comp, time_giant = 0, id_giant; i = start_node; do { vertex_position = i; if (metadata[i].touched == 0) { CLOCK_START(time_comp); metadata[i].touched = 1; components++; BFS_PUSH(nq_head, nq_tail, i, metadata); queue_length = 1; } else { i++; if (i >= graph->vertex_cnt) i = 0; continue; } while (nq_head != ULONG_MAX) { queue_head = nq_head; queue_tail = nq_tail; nq_head = ULONG_MAX; nq_tail = ULONG_MAX; #pragma omp parallel default(shared) { #pragma omp task default(shared) { while (1) { unsigned long current_vertex; char finished = 0; #pragma omp critical (check_queue) { if (queue_head != ULONG_MAX) { current_vertex = BFS_POP(queue_head, queue_tail, metadata); visited++; } else { current_vertex = ULONG_MAX; } } if (current_vertex == ULONG_MAX) break; //fprintf(stderr, "V %ld %d\n", current_vertex, // omp_get_num_threads()); if (current_vertex != ULONG_MAX) { unsigned long lq_head = ULONG_MAX; unsigned long lq_tail = ULONG_MAX; csr_edge_iterator_t iter; csr_init_edge_iterator(graph, current_vertex, &iter); while (csr_iter_step(graph, &iter) == 0) { if (!iter.incoming) { unsigned long target = iter.neighbour; //#pragma omp critical (atomicset) { if (__sync_bool_compare_and_swap(&(metadata[target].touched),0, 1)) { //metadata[target].touched = 1; BFS_PUSH(lq_head, lq_tail, target, metadata); // fprintf(stderr, "T %ld %d\n", target, // omp_get_thread_num()); } } } } #pragma omp critical (stitch) { BFS_STITCH(nq_head, nq_tail, lq_head, lq_tail, metadata); // fprintf(stderr, "%ld %ld %ld %ld\n", nq_head, nq_tail, lq_head, lq_tail); } } } } } } CLOCK_STOP(time_comp); if (time_comp > time_giant) { time_giant = time_comp; id_giant = i; printf("Visited %ld\n", visited); } i = i + 1; if (i >= graph->vertex_cnt) { i = 0; } } while (i != start_node); // fprintf(stderr, "%ld %ld\n", visited, graph->vertex_cnt); assert(visited == graph->vertex_cnt); printf("TIME GIANT COMP %lu\n", time_giant); printf("ID GIANT COMP %lu\n", id_giant); return components; } int main(int argc, char **argv) { unsigned long time_bfs, time_total, components; CLOCK_START(time_total); if (argc < 3) { fprintf(stderr, "Usage %s graph_name root_id\n", argv[0]); exit(-1); } #ifdef PREFETCHER char *env_var; env_var = getenv("CMAX"); if(env_var != NULL) { MAX_CACHE = atol(env_var); } env_var = getenv("CMIN"); if(env_var != NULL) { MIN_CACHE = atol(env_var); } bind_master(); init_prefetcher(prefetcher_random_callback, NULL); // prefetcher_sequential_callback); #endif graph = open_csr(argv[1]); metadata = (bfs_metadata_t*) map_anon_memory(graph->vertex_cnt * sizeof(bfs_metadata_t), "vertex metadata"); //balloon_inflate(); /* Simulate semi-em conditions */ print_mlocked_memory(); unsigned long root_id = atol(argv[2]); assert(root_id < graph->vertex_cnt); /* Perhaps mmap /dev/null instead ? */ memset(metadata, 0, graph->vertex_cnt * sizeof(bfs_metadata_t)); #ifdef PREFETCHER launch_prefetch_thread(graph->fd_calist); #endif struct rusage ru_begin; getrusage(RUSAGE_SELF, &ru_begin); CLOCK_START(time_bfs); components = bfs(graph, root_id); CLOCK_STOP(time_bfs); struct rusage ru_end; getrusage(RUSAGE_SELF, &ru_end); #ifdef PREFETCHER terminate_prefetch_thread(); destroy_prefetcher(); #endif munmap(metadata, graph->vertex_cnt * sizeof(bfs_metadata_t)); close_csr(graph); CLOCK_STOP(time_total); printf("COMPONENTS %lu\n", components); printf("TIME BFS %lu\n", time_bfs); printf("TIME TOTAL %lu\n", time_total); print_rusage_stats(stdout, &ru_begin, &ru_end); printf("F_THRESHOLD %f\n", ((double) queue_above_threshold) / total_queue_demands); return 0; }
#include "graph_defs.h" #include "prefetcher.h" typedef struct bfs_metadata_st { char touched; volatile unsigned long queue_next; } bfs_metadata_t; static volatile unsigned long queue_head = ULONG_MAX; static volatile unsigned long vertex_position = 0; static bfs_metadata_t *metadata; static csr_t *volatile graph; unsigned long MAX_CACHE = ULONG_MAX; long MIN_CACHE = 0; unsigned long visited = 0; void prefetcher_random_callback(unsigned long *laf, unsigned long laf_size, unsigned long ift) { static unsigned long old_hoq = ULONG_MAX; unsigned long current_hoq = ULONG_MAX; static unsigned long ra_depth = 0; static char preload = 0; static long pf_visited = 0; unsigned long entries = 0; /* Fill in inner-loop entries from BFS queue */ /* * if ((preload == 0) && (ra_depth > MAX_CACHE)) { preload = 1; * current_hoq = ULONG_MAX; } */ current_hoq = old_hoq; if ((current_hoq == ULONG_MAX) || (((signed long)(pf_visited - visited)) > MIN_CACHE) /* || (ra_depth > MIN_CACHE) */ ) { current_hoq = queue_head; pf_visited = visited; //ra_depth = 0; } //if (((signed long)(pf_visited - visited)) > MIN_CACHE) return; /* * if(current_hoq != ULONG_MAX) { current_hoq = * metadata[current_hoq].queue_next; } */ while (entries != ift && current_hoq != ULONG_MAX) { unsigned long page = graph->index[current_hoq]; unsigned long end = graph->index[current_hoq + 1]; page = page >> (ASSUME_PAGE_SHIFT + 3); /* offset is in bits ! */ end = end >> (ASSUME_PAGE_SHIFT + 3); //if (laf[HASH_MODULO(page, laf_size)] != page) { //laf[HASH_MODULO(page, laf_size)] = page; //for (; page <= end; page++) { //if (entries == ift) break; laf[entries] = page; if (end > page) laf[entries + (2 * laf_size)] = end - page; entries++; // } // } old_hoq = current_hoq; current_hoq = metadata[current_hoq].queue_next; pf_visited++; } ra_depth += entries; } unsigned long prefetcher_sequential_callback(unsigned long *aux_offset) { unsigned long offset = graph->index[vertex_position]; return offset >> (ASSUME_PAGE_SHIFT + 3); } unsigned long alist_entries_seen = 0; // unsigned long total_queue_demands = 0; unsigned long queue_above_threshold = 0; unsigned long queue_length = 0; /* returns number of connected components */ static unsigned long bfs(csr_t * graph, unsigned long start_node) { unsigned long i; unsigned long components = 0; unsigned long queue_tail = ULONG_MAX; unsigned long nq_head = ULONG_MAX; unsigned long nq_tail = ULONG_MAX; char *finished_flag = NULL; unsigned long time_comp, time_giant = 0, id_giant; i = start_node; do { vertex_position = i; if (metadata[i].touched == 0) { CLOCK_START(time_comp); metadata[i].touched = 1; components++; BFS_PUSH(nq_head, nq_tail, i, metadata); queue_length = 1; } else { i++; if (i >= graph->vertex_cnt) i = 0; continue; } while (nq_head != ULONG_MAX) { queue_head = nq_head; queue_tail = nq_tail; nq_head = ULONG_MAX; nq_tail = ULONG_MAX; #pragma omp task default(shared) { while (1) { unsigned long current_vertex; char finished = 0; #pragma omp critical (check_queue) { if (queue_head != ULONG_MAX) { current_vertex = BFS_POP(queue_head, queue_tail, metadata); visited++; } else { current_vertex = ULONG_MAX; } } if (current_vertex == ULONG_MAX) break; //fprintf(stderr, "V %ld %d\n", current_vertex, //omp_get_num_threads()); if (current_vertex != ULONG_MAX) { unsigned long lq_head = ULONG_MAX; unsigned long lq_tail = ULONG_MAX; csr_edge_iterator_t iter; csr_init_edge_iterator(graph, current_vertex, &iter); while (csr_iter_step(graph, &iter) == 0) { if (!iter.incoming) { unsigned long target = iter.neighbour; // if (__sync_bool_compare_and_swap(&(metadata[target].touched), 0, 1)) { //metadata[target].touched = 1; BFS_PUSH(lq_head, lq_tail, target, metadata); //fprintf(stderr, "T %ld %d\n", target, //omp_get_thread_num()); } } } BFS_STITCH(nq_head, nq_tail, lq_head, lq_tail, metadata); //fprintf(stderr, "%ld %ld %ld %ld\n", nq_head, nq_tail, lq_head, lq_tail); } } } } CLOCK_STOP(time_comp); if (time_comp > time_giant) { time_giant = time_comp; id_giant = i; printf("Visited %ld\n", visited); } i = i + 1; if (i >= graph->vertex_cnt) { i = 0; } } while (i != start_node); //fprintf(stderr, "%ld %ld\n", visited, graph->vertex_cnt); assert(visited == graph->vertex_cnt); printf("TIME GIANT COMP %lu\n", time_giant); printf("ID GIANT COMP %lu\n", id_giant); return components; } int main(int argc, char **argv) { unsigned long time_bfs, time_total, components; CLOCK_START(time_total); if (argc < 3) { fprintf(stderr, "Usage %s graph_name root_id\n", argv[0]); exit(-1); } #ifdef PREFETCHER char *env_var; env_var = getenv("CMAX"); if (env_var != NULL) { MAX_CACHE = atol(env_var); } env_var = getenv("CMIN"); if (env_var != NULL) { MIN_CACHE = atol(env_var); } bind_master(); init_prefetcher(prefetcher_random_callback, NULL); //prefetcher_sequential_callback); #endif graph = open_csr(argv[1]); metadata = (bfs_metadata_t *) map_anon_memory(graph->vertex_cnt * sizeof(bfs_metadata_t), "vertex metadata"); //balloon_inflate(); /* Simulate semi-em conditions */ print_mlocked_memory(); unsigned long root_id = atol(argv[2]); assert(root_id < graph->vertex_cnt); /* Perhaps mmap /dev/null instead ? */ memset(metadata, 0, graph->vertex_cnt * sizeof(bfs_metadata_t)); #ifdef PREFETCHER launch_prefetch_thread(graph->fd_calist); #endif struct rusage ru_begin; getrusage(RUSAGE_SELF, &ru_begin); CLOCK_START(time_bfs); components = bfs(graph, root_id); CLOCK_STOP(time_bfs); struct rusage ru_end; getrusage(RUSAGE_SELF, &ru_end); #ifdef PREFETCHER terminate_prefetch_thread(); destroy_prefetcher(); #endif munmap(metadata, graph->vertex_cnt * sizeof(bfs_metadata_t)); close_csr(graph); CLOCK_STOP(time_total); printf("COMPONENTS %lu\n", components); printf("TIME BFS %lu\n", time_bfs); printf("TIME TOTAL %lu\n", time_total); print_rusage_stats(stdout, &ru_begin, &ru_end); printf("F_THRESHOLD %f\n", ((double)queue_above_threshold) / total_queue_demands); return 0; }
#include "graph_defs.h" #include "prefetcher.h" typedef struct bfs_metadata_st { char touched; volatile unsigned long queue_next; } bfs_metadata_t; static volatile unsigned long queue_head = ULONG_MAX; static volatile unsigned long vertex_position = 0; static bfs_metadata_t *metadata; static csr_t *volatile graph; unsigned long MAX_CACHE = ULONG_MAX; long MIN_CACHE = 0; unsigned long visited = 0; void prefetcher_random_callback(unsigned long *laf, unsigned long laf_size, unsigned long ift) { static unsigned long old_hoq = ULONG_MAX; unsigned long current_hoq = ULONG_MAX; static unsigned long ra_depth = 0; static char preload = 0; static long pf_visited = 0; unsigned long entries = 0; /* Fill in inner-loop entries from BFS queue */ /* * if ((preload == 0) && (ra_depth > MAX_CACHE)) { preload = 1; * current_hoq = ULONG_MAX; } */ current_hoq = old_hoq; if ((current_hoq == ULONG_MAX) || (((signed long)(pf_visited - visited)) > MIN_CACHE) /* || (ra_depth > MIN_CACHE) */ ) { current_hoq = queue_head; pf_visited = visited; //ra_depth = 0; } //if (((signed long)(pf_visited - visited)) > MIN_CACHE) return; /* * if(current_hoq != ULONG_MAX) { current_hoq = * metadata[current_hoq].queue_next; } */ while (entries != ift && current_hoq != ULONG_MAX) { unsigned long page = graph->index[current_hoq]; unsigned long end = graph->index[current_hoq + 1]; page = page >> (ASSUME_PAGE_SHIFT + 3); /* offset is in bits ! */ end = end >> (ASSUME_PAGE_SHIFT + 3); //if (laf[HASH_MODULO(page, laf_size)] != page) { //laf[HASH_MODULO(page, laf_size)] = page; //for (; page <= end; page++) { //if (entries == ift) break; laf[entries] = page; if (end > page) laf[entries + (2 * laf_size)] = end - page; entries++; // } // } old_hoq = current_hoq; current_hoq = metadata[current_hoq].queue_next; pf_visited++; } ra_depth += entries; } unsigned long prefetcher_sequential_callback(unsigned long *aux_offset) { unsigned long offset = graph->index[vertex_position]; return offset >> (ASSUME_PAGE_SHIFT + 3); } unsigned long alist_entries_seen = 0; // #pragma omp threadprivate(current_vertex) unsigned long total_queue_demands = 0; unsigned long queue_above_threshold = 0; unsigned long queue_length = 0; /* returns number of connected components */ static unsigned long bfs(csr_t * graph, unsigned long start_node) { unsigned long i; unsigned long components = 0; unsigned long queue_tail = ULONG_MAX; unsigned long nq_head = ULONG_MAX; unsigned long nq_tail = ULONG_MAX; char *finished_flag = NULL; unsigned long time_comp, time_giant = 0, id_giant; i = start_node; do { vertex_position = i; if (metadata[i].touched == 0) { CLOCK_START(time_comp); metadata[i].touched = 1; components++; BFS_PUSH(nq_head, nq_tail, i, metadata); queue_length = 1; } else { i++; if (i >= graph->vertex_cnt) i = 0; continue; } while (nq_head != ULONG_MAX) { queue_head = nq_head; queue_tail = nq_tail; nq_head = ULONG_MAX; nq_tail = ULONG_MAX; #pragma omp parallel default(shared) { #pragma omp task default(shared) { while (1) { unsigned long current_vertex; char finished = 0; #pragma omp critical (check_queue) { if (queue_head != ULONG_MAX) { current_vertex = BFS_POP(queue_head, queue_tail, metadata); visited++; } else { current_vertex = ULONG_MAX; } } if (current_vertex == ULONG_MAX) break; //fprintf(stderr, "V %ld %d\n", current_vertex, //omp_get_num_threads()); if (current_vertex != ULONG_MAX) { unsigned long lq_head = ULONG_MAX; unsigned long lq_tail = ULONG_MAX; csr_edge_iterator_t iter; csr_init_edge_iterator(graph, current_vertex, &iter); while (csr_iter_step(graph, &iter) == 0) { if (!iter.incoming) { unsigned long target = iter.neighbour; // #pragma omp critical (atomicset) { if (__sync_bool_compare_and_swap(&(metadata[target].touched), 0, 1)) { //metadata[target].touched = 1; BFS_PUSH(lq_head, lq_tail, target, metadata); //fprintf(stderr, "T %ld %d\n", target, //omp_get_thread_num()); } } } } #pragma omp critical (stitch) { BFS_STITCH(nq_head, nq_tail, lq_head, lq_tail, metadata); //fprintf(stderr, "%ld %ld %ld %ld\n", nq_head, nq_tail, lq_head, lq_tail); } } } } } } CLOCK_STOP(time_comp); if (time_comp > time_giant) { time_giant = time_comp; id_giant = i; printf("Visited %ld\n", visited); } i = i + 1; if (i >= graph->vertex_cnt) { i = 0; } } while (i != start_node); //fprintf(stderr, "%ld %ld\n", visited, graph->vertex_cnt); assert(visited == graph->vertex_cnt); printf("TIME GIANT COMP %lu\n", time_giant); printf("ID GIANT COMP %lu\n", id_giant); return components; } int main(int argc, char **argv) { unsigned long time_bfs, time_total, components; CLOCK_START(time_total); if (argc < 3) { fprintf(stderr, "Usage %s graph_name root_id\n", argv[0]); exit(-1); } #ifdef PREFETCHER char *env_var; env_var = getenv("CMAX"); if (env_var != NULL) { MAX_CACHE = atol(env_var); } env_var = getenv("CMIN"); if (env_var != NULL) { MIN_CACHE = atol(env_var); } bind_master(); init_prefetcher(prefetcher_random_callback, NULL); //prefetcher_sequential_callback); #endif graph = open_csr(argv[1]); metadata = (bfs_metadata_t *) map_anon_memory(graph->vertex_cnt * sizeof(bfs_metadata_t), "vertex metadata"); //balloon_inflate(); /* Simulate semi-em conditions */ print_mlocked_memory(); unsigned long root_id = atol(argv[2]); assert(root_id < graph->vertex_cnt); /* Perhaps mmap /dev/null instead ? */ memset(metadata, 0, graph->vertex_cnt * sizeof(bfs_metadata_t)); #ifdef PREFETCHER launch_prefetch_thread(graph->fd_calist); #endif struct rusage ru_begin; getrusage(RUSAGE_SELF, &ru_begin); CLOCK_START(time_bfs); components = bfs(graph, root_id); CLOCK_STOP(time_bfs); struct rusage ru_end; getrusage(RUSAGE_SELF, &ru_end); #ifdef PREFETCHER terminate_prefetch_thread(); destroy_prefetcher(); #endif munmap(metadata, graph->vertex_cnt * sizeof(bfs_metadata_t)); close_csr(graph); CLOCK_STOP(time_total); printf("COMPONENTS %lu\n", components); printf("TIME BFS %lu\n", time_bfs); printf("TIME TOTAL %lu\n", time_total); print_rusage_stats(stdout, &ru_begin, &ru_end); printf("F_THRESHOLD %f\n", ((double)queue_above_threshold) / total_queue_demands); return 0; }
sxc_fmt_plug.c
/* SXC cracker patch for JtR. Hacked together during Summer of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_sxc); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "sha.h" #include <openssl/blowfish.h> #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 2 // tuned on core i7 #endif #include "memdbg.h" #define FORMAT_LABEL "sxc" #define FORMAT_NAME "StarOffice .sxc" #ifdef MMX_COEF #define ALGORITHM_NAME "SHA1 Blowfish " SHA1_N_STR MMX_TYPE #else #define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int cipher_type; // FIXME: cipher_type seems to be ignored int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$sxc$", 5)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtok(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtok(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (strlen(p) != BINARY_SIZE * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtok(NULL, "*")) == NULL) /* content */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if (strtok(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtok(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtok(NULL, "*"); cs.checksum_type = atoi(p); p = strtok(NULL, "*"); cs.iterations = atoi(p); p = strtok(NULL, "*"); cs.key_size = atoi(p); strtok(NULL, "*"); /* skip checksum field */ p = strtok(NULL, "*"); cs.iv_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.salt_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.original_length = atoi(p); p = strtok(NULL, "*"); cs.length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtok(ctcopy, "*"); strtok(NULL, "*"); strtok(NULL, "*"); strtok(NULL, "*"); p = strtok(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[MAX_KEYS_PER_CRYPT][32]; unsigned char hash[MAX_KEYS_PER_CRYPT][32]; BF_KEY bf_key; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; int i; SHA_CTX ctx; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i])); SHA1_Final((unsigned char *)hash[i], &ctx); } #ifdef MMX_COEF { int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 20; pin[i] = (unsigned char*)hash[i]; pout[i] = key[i]; } pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, cur_salt->key_size, 0); } #else pbkdf2_sha1(hash[0], 20, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, key[0], cur_salt->key_size, 0); #if !ARCH_LITTLE_ENDIAN for (i = 0; i < cur_salt->key_size/sizeof(ARCH_WORD_32); ++i) { ((ARCH_WORD_32*)key[0])[i] = JOHNSWAP(((ARCH_WORD_32*)key[0])[i]); } #endif #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, key[i]); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index+i], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void sxc_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } #endif struct fmt_main fmt_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif sxc_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, sxc_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_sxc); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "sha.h" #include <openssl/blowfish.h> #include "pbkdf2_hmac_sha1.h" #include "memdbg.h" #define FORMAT_LABEL "sxc" #define FORMAT_NAME "StarOffice .sxc" #ifdef MMX_COEF #define ALGORITHM_NAME "SHA1 Blowfish " SHA1_N_STR MMX_TYPE #else #define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32(*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int cipher_type; //FIXME: cipher_type seems to be ignored int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } *cur_salt; static void init(struct fmt_main *self) { saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$sxc$", 5)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtok(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtok(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (strlen(p) != BINARY_SIZE * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char * output[1024];" in crypt_all */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtok(NULL, "*")) == NULL) /* content */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if (strtok(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtok(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtok(NULL, "*"); cs.checksum_type = atoi(p); p = strtok(NULL, "*"); cs.iterations = atoi(p); p = strtok(NULL, "*"); cs.key_size = atoi(p); strtok(NULL, "*"); /* skip checksum field */ p = strtok(NULL, "*"); cs.iv_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.salt_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.original_length = atoi(p); p = strtok(NULL, "*"); cs.length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE + 1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtok(ctcopy, "*"); strtok(NULL, "*"); strtok(NULL, "*"); strtok(NULL, "*"); p = strtok(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; { unsigned char key[MAX_KEYS_PER_CRYPT][32]; unsigned char hash[MAX_KEYS_PER_CRYPT][32]; BF_KEY bf_key; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; int i; SHA_CTX ctx; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index + i], strlen(saved_key[index + i])); SHA1_Final((unsigned char *)hash[i], &ctx); } #ifdef MMX_COEF { int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 20; pin[i] = (unsigned char *)hash[i]; pout[i] = key[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, cur_salt->key_size, 0); } #else pbkdf2_sha1(hash[0], 20, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, key[0], cur_salt->key_size, 0); #if !ARCH_LITTLE_ENDIAN for (i = 0; i < cur_salt->key_size / sizeof(ARCH_WORD_32); ++i) { ((ARCH_WORD_32 *) key[0])[i] = JOHNSWAP(((ARCH_WORD_32 *) key[0])[i]); } #endif #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, key[i]); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char *)crypt_out[index + i], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void sxc_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char * get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } #endif struct fmt_main fmt_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif sxc_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, sxc_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_sxc); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "sha.h" #include <openssl/blowfish.h> #include "pbkdf2_hmac_sha1.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 2 // tuned on core i7 #endif #include "memdbg.h" #define FORMAT_LABEL "sxc" #define FORMAT_NAME "StarOffice .sxc" #ifdef MMX_COEF #define ALGORITHM_NAME "SHA1 Blowfish " SHA1_N_STR MMX_TYPE #else #define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef MMX_COEF #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32(*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int cipher_type; //FIXME: cipher_type seems to be ignored int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$sxc$", 5)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 6; if ((p = strtok(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtok(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (strlen(p) != BINARY_SIZE * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtok(NULL, "*")) == NULL) /* iv */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char * output[1024];" in crypt_all */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtok(NULL, "*")) == NULL) /* content */ goto err; if (strlen(p) != res * 2) goto err; if (!ishex(p)) goto err; if (strtok(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtok(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtok(NULL, "*"); cs.checksum_type = atoi(p); p = strtok(NULL, "*"); cs.iterations = atoi(p); p = strtok(NULL, "*"); cs.key_size = atoi(p); strtok(NULL, "*"); /* skip checksum field */ p = strtok(NULL, "*"); cs.iv_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.salt_length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.original_length = atoi(p); p = strtok(NULL, "*"); cs.length = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE + 1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += 6; /* skip over "$sxc$*" */ strtok(ctcopy, "*"); strtok(NULL, "*"); strtok(NULL, "*"); strtok(NULL, "*"); p = strtok(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char key[MAX_KEYS_PER_CRYPT][32]; unsigned char hash[MAX_KEYS_PER_CRYPT][32]; BF_KEY bf_key; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; int i; SHA_CTX ctx; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index + i], strlen(saved_key[index + i])); SHA1_Final((unsigned char *)hash[i], &ctx); } #ifdef MMX_COEF { int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 20; pin[i] = (unsigned char *)hash[i]; pout[i] = key[i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, cur_salt->key_size, 0); } #else pbkdf2_sha1(hash[0], 20, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, key[0], cur_salt->key_size, 0); #if !ARCH_LITTLE_ENDIAN for (i = 0; i < cur_salt->key_size / sizeof(ARCH_WORD_32); ++i) { ((ARCH_WORD_32 *) key[0])[i] = JOHNSWAP(((ARCH_WORD_32 *) key[0])[i]); } #endif #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, key[i]); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char *)crypt_out[index + i], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void sxc_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char * get_key(int index) { return saved_key[index]; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } #endif struct fmt_main fmt_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif sxc_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, sxc_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
9857.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
rawSHA1_ng_fmt_plug.c
// // Alternative SSE2 optimised raw SHA-1 implementation for John The Ripper. // // This plugin requires -msse4 in CFLAGS. // // Copyright (C) 2012 Tavis Ormandy <taviso@cmpxchg8b.com> // Copyright (c) 2015 magnum (AVX2/AVX512 support) // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Library General Public // License as published by the Free Software Foundation; either // version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Library General Public License for more details. // // You should have received a copy of the GNU Library General Public // License along with this library; if not, write to the // Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, // Boston, MA 02110-1301, USA. // #include "arch.h" #if defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER && !__ARM_NEON #if FMT_EXTERNS_H extern struct fmt_main fmt_sha1_ng; #elif FMT_REGISTERS_H john_register_one(&fmt_sha1_ng); #else #include "misc.h" #if !defined(DEBUG) && !defined(WITH_ASAN) // These compilers claim to be __GNUC__ but warn on gcc pragmas. #if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER #pragma GCC optimize 3 #pragma GCC optimize "-fprefetch-loop-arrays" #endif #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 #endif #include <string.h> #include <stdint.h> #if !FAST_FORMATS_OMP #undef _OPENMP #elif _OPENMP #include <omp.h> #endif #include "stdbool.h" #if SIMD_COEF_32 > 8 #include "int128.h" #endif #include "pseudo_intrinsics.h" #include "params.h" #include "formats.h" #include "memory.h" #include "sha.h" #include "johnswap.h" #include "aligned.h" #include "rawSHA1_common.h" #include "memdbg.h" #define VWIDTH SIMD_COEF_32 #define SHA1_BLOCK_WORDS 16 #define SHA1_DIGEST_WORDS 5 #define SHA1_PARALLEL_HASH 512 // This must be a multiple of max VWIDTH. #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 // Multiplier to hide OMP overhead #endif #endif #define X(X0, X2, X8, X13) do { \ X0 = vxor(X0, X8); \ X0 = vxor(X0, X13); \ X0 = vxor(X0, X2); \ X0 = vroti_epi32(X0, 1); \ } while (false) #define R1(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(C, D, B)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R2(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R4(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #if !VCMOV_EMULATED #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(D, B, vxor(C, B))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #else #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vor(vand(D, B), vand(vor(D, B), C))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #endif #if SIMD_COEF_32 == 4 // Not used for AVX2 and better, which has gather instructions. #define _MM_TRANSPOSE4_EPI32(R0, R1, R2, R3) do {\ vtype T0, T1, T2, T3; \ T0 = vunpacklo_epi32(R0, R1); \ T1 = vunpacklo_epi32(R2, R3); \ T2 = vunpackhi_epi32(R0, R1); \ T3 = vunpackhi_epi32(R2, R3); \ R0 = vunpacklo_epi64(T0, T1); \ R1 = vunpackhi_epi64(T0, T1); \ R2 = vunpacklo_epi64(T2, T3); \ R3 = vunpackhi_epi64(T2, T3); \ } while (false) #endif // M and N contain the first and last 128bits of a 512bit SHA-1 message block // respectively. The remaining 256bits are always zero, and so are not stored // here to avoid the load overhead. // For AVX2, we have half a block and for AVX512/MIC we actually have a full // block. static uint32_t (*M)[VWIDTH]; static uint32_t *N; // MD contains the state of the SHA-1 A register at R75 for each of the input // messages. static uint32_t *MD; /* unused inline static uint32_t __attribute__((const)) rotateright(uint32_t value, uint8_t count) { register uint32_t result; asm("ror %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); return result; } */ inline static uint32_t __attribute__((const)) rotateleft(uint32_t value, uint8_t count) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _rotl(value, count); //((value<<count)|((uint32_t)value>>(32-count))); #elif __i386__ || __x86_64__ asm("rol %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); #else // assume count <= 32 result = (value << count) | (value >> (32 - count)); #endif return result; } // GCC < 4.3 does not have __builtin_bswap32(), provide an alternative. #if !__INTEL_COMPILER && GCC_VERSION < 40300 #define __builtin_bswap32 bswap32 inline static uint32_t __attribute__((const)) bswap32(uint32_t value) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _byteswap_ulong(value); #elif __i386 || __x86_64__ asm("bswap %0" : "=r" (result) : "0" (value)); #else result = (value << 24) | ((value << 8) & 0xFF0000) | (value >> 24) | ((value >> 8) & 0xFF00); #endif return result; } #endif static void sha1_fmt_init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif M = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*M), MEM_ALIGN_CACHE); N = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*N), MEM_ALIGN_CACHE); MD = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*MD), MEM_ALIGN_CACHE); } static void done(void) { MEM_FREE(MD); MEM_FREE(N); MEM_FREE(M); } static void *sha1_fmt_binary(char *ciphertext) { // Static buffer storing the binary representation of ciphertext. static union { uint32_t w[SHA1_DIGEST_WORDS]; vtype v; } result; uint32_t a75; // Convert ascii representation into binary. memcpy(result.w, rawsha1_common_get_binary(ciphertext), 20); // One preprocessing step, if we calculate E80 rol 2 here, we // can compare it against A75 and save 5 rounds in crypt_all(). a75 = rotateleft(__builtin_bswap32(result.w[4]) - 0xC3D2E1F0, 2); // Fill the vector with it, so we can do a vectorized compare result.v = vset1_epi32(a75); return result.w; } // This function is called when John wants us to buffer a crypt() operation // on the specified key. We also preprocess it for SHA-1 as we load it. // // This implementation is hardcoded to only accept passwords under 15 // characters. This is because we can create a new message block in just two // MOVDQA instructions (we need 15 instead of 16 because we must append a bit // to the message). For AVX2 it's 31 characters and for AVX-512+ it's 125. // // This routine assumes that key is not on an unmapped page boundary, but // doesn't require it to be 16 byte aligned (although that would be nice). static void sha1_fmt_set_key(char *key, int index) { vtype Z = vsetzero(); vtype X = vloadu(key); vtype B; // First, find the length of the key by scanning for a zero byte. #if (__AVX512F__ && !__AVX512BW__) || __MIC__ || __ALTIVEC__ || __ARM_NEON uint32_t len = strlen(key); #else // FIXME: even uint64_t won't be long enough for AVX-1024 uint64_t mask = vcmpeq_epi8_mask(X, Z); uint32_t len = __builtin_ctzl(mask); #endif // Create a lookup tables to find correct masks for each supported input // length. It would be nice if we could use bit shifts to produce these // dynamically, but they require an immediate operand. #if VWIDTH > 8 // FIXME: a problem with using int128 here is it won't work at // all for 32-bit builds - but that may be academic. #define XX ((((uint128_t)0xFFFFFFFFFFFFFFFFULL)<<64) + 0xFFFFFFFFFFFFFFFFULL) #define YY ((uint128_t)0x80) #define ZZ ((uint128_t)0x0) static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kTrailingBitTable[][4] = { {YY<< 0, ZZ, ZZ, ZZ}, {YY<< 8, ZZ, ZZ, ZZ}, {YY<< 16, ZZ, ZZ, ZZ}, {YY<< 24, ZZ, ZZ, ZZ}, {YY<< 32, ZZ, ZZ, ZZ}, {YY<< 40, ZZ, ZZ, ZZ}, {YY<< 48, ZZ, ZZ, ZZ}, {YY<< 56, ZZ, ZZ, ZZ}, {YY<< 64, ZZ, ZZ, ZZ}, {YY<< 72, ZZ, ZZ, ZZ}, {YY<< 80, ZZ, ZZ, ZZ}, {YY<< 88, ZZ, ZZ, ZZ}, {YY<< 96, ZZ, ZZ, ZZ}, {YY<<104, ZZ, ZZ, ZZ}, {YY<<112, ZZ, ZZ, ZZ}, {YY<<120, ZZ, ZZ, ZZ}, {ZZ, YY<< 0, ZZ, ZZ}, {ZZ, YY<< 8, ZZ, ZZ}, {ZZ, YY<< 16, ZZ, ZZ}, {ZZ, YY<< 24, ZZ, ZZ}, {ZZ, YY<< 32, ZZ, ZZ}, {ZZ, YY<< 40, ZZ, ZZ}, {ZZ, YY<< 48, ZZ, ZZ}, {ZZ, YY<< 56, ZZ, ZZ}, {ZZ, YY<< 64, ZZ, ZZ}, {ZZ, YY<< 72, ZZ, ZZ}, {ZZ, YY<< 80, ZZ, ZZ}, {ZZ, YY<< 88, ZZ, ZZ}, {ZZ, YY<< 96, ZZ, ZZ}, {ZZ, YY<<104, ZZ, ZZ}, {ZZ, YY<<112, ZZ, ZZ}, {ZZ, YY<<120, ZZ, ZZ}, {ZZ, ZZ, YY<< 0, ZZ}, {ZZ, ZZ, YY<< 8, ZZ}, {ZZ, ZZ, YY<< 16, ZZ}, {ZZ, ZZ, YY<< 24, ZZ}, {ZZ, ZZ, YY<< 32, ZZ}, {ZZ, ZZ, YY<< 40, ZZ}, {ZZ, ZZ, YY<< 48, ZZ}, {ZZ, ZZ, YY<< 56, ZZ}, {ZZ, ZZ, YY<< 64, ZZ}, {ZZ, ZZ, YY<< 72, ZZ}, {ZZ, ZZ, YY<< 80, ZZ}, {ZZ, ZZ, YY<< 88, ZZ}, {ZZ, ZZ, YY<< 96, ZZ}, {ZZ, ZZ, YY<<104, ZZ}, {ZZ, ZZ, YY<<112, ZZ}, {ZZ, ZZ, YY<<120, ZZ}, {ZZ, ZZ, ZZ, YY<< 0}, {ZZ, ZZ, ZZ, YY<< 8}, {ZZ, ZZ, ZZ, YY<< 16}, {ZZ, ZZ, ZZ, YY<< 24}, {ZZ, ZZ, ZZ, YY<< 32}, {ZZ, ZZ, ZZ, YY<< 40}, {ZZ, ZZ, ZZ, YY<< 48}, {ZZ, ZZ, ZZ, YY<< 56}, {ZZ, ZZ, ZZ, YY<< 64}, {ZZ, ZZ, ZZ, YY<< 72}, {ZZ, ZZ, ZZ, YY<< 80}, {ZZ, ZZ, ZZ, YY<< 88}, {ZZ, ZZ, ZZ, YY<< 96}, {ZZ, ZZ, ZZ, YY<<104}, {ZZ, ZZ, ZZ, YY<<112}, {ZZ, ZZ, ZZ, YY<<120} }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kUsedBytesTable[][4] = { {XX<< 0, XX, XX, XX}, {XX<< 8, XX, XX, XX}, {XX<< 16, XX, XX, XX}, {XX<< 24, XX, XX, XX}, {XX<< 32, XX, XX, XX}, {XX<< 40, XX, XX, XX}, {XX<< 48, XX, XX, XX}, {XX<< 56, XX, XX, XX}, {XX<< 64, XX, XX, XX}, {XX<< 72, XX, XX, XX}, {XX<< 80, XX, XX, XX}, {XX<< 88, XX, XX, XX}, {XX<< 96, XX, XX, XX}, {XX<<104, XX, XX, XX}, {XX<<112, XX, XX, XX}, {XX<<120, XX, XX, XX}, {ZZ, XX<< 0, XX, XX}, {ZZ, XX<< 8, XX, XX}, {ZZ, XX<< 16, XX, XX}, {ZZ, XX<< 24, XX, XX}, {ZZ, XX<< 32, XX, XX}, {ZZ, XX<< 40, XX, XX}, {ZZ, XX<< 48, XX, XX}, {ZZ, XX<< 56, XX, XX}, {ZZ, XX<< 64, XX, XX}, {ZZ, XX<< 72, XX, XX}, {ZZ, XX<< 80, XX, XX}, {ZZ, XX<< 88, XX, XX}, {ZZ, XX<< 96, XX, XX}, {ZZ, XX<<104, XX, XX}, {ZZ, XX<<112, XX, XX}, {ZZ, XX<<120, XX, XX}, {ZZ, ZZ, XX<< 0, XX}, {ZZ, ZZ, XX<< 8, XX}, {ZZ, ZZ, XX<< 16, XX}, {ZZ, ZZ, XX<< 24, XX}, {ZZ, ZZ, XX<< 32, XX}, {ZZ, ZZ, XX<< 40, XX}, {ZZ, ZZ, XX<< 48, XX}, {ZZ, ZZ, XX<< 56, XX}, {ZZ, ZZ, XX<< 64, XX}, {ZZ, ZZ, XX<< 72, XX}, {ZZ, ZZ, XX<< 80, XX}, {ZZ, ZZ, XX<< 88, XX}, {ZZ, ZZ, XX<< 96, XX}, {ZZ, ZZ, XX<<104, XX}, {ZZ, ZZ, XX<<112, XX}, {ZZ, ZZ, XX<<120, XX}, {ZZ, ZZ, ZZ, XX<< 0}, {ZZ, ZZ, ZZ, XX<< 8}, {ZZ, ZZ, ZZ, XX<< 16}, {ZZ, ZZ, ZZ, XX<< 24}, {ZZ, ZZ, ZZ, XX<< 32}, {ZZ, ZZ, ZZ, XX<< 40}, {ZZ, ZZ, ZZ, XX<< 48}, {ZZ, ZZ, ZZ, XX<< 56}, {ZZ, ZZ, ZZ, XX<< 64}, {ZZ, ZZ, ZZ, XX<< 72}, {ZZ, ZZ, ZZ, XX<< 80}, {ZZ, ZZ, ZZ, XX<< 88}, {ZZ, ZZ, ZZ, XX<< 96}, {ZZ, ZZ, ZZ, XX<<104}, {ZZ, ZZ, ZZ, XX<<112}, {ZZ, ZZ, ZZ, XX<<120} }; #elif VWIDTH > 4 static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][8] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][8] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #else static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][4] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][4] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #endif N[index] = len; // Zero out the rest of the DQWORD in X by making a suitable mask. Z = vload(kUsedBytesTable[len]); // Find the correct position for the trailing bit required by SHA-1. B = vload(kTrailingBitTable[len]); // Now we have this: // B = 00 00 00 00 00 80 00 00 00 00 00 00 00 00 00 // Z = 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff // X = 41 41 41 41 41 00 12 34 56 78 12 34 56 78 9A // <---------------> <------------------------> // key bytes w/nul junk from stack. // Use PANDN to apply the mask, then POR to append the trailing bit // required by SHA-1, which leaves us with this: // X = 41 41 41 41 41 80 00 00 00 00 00 00 00 00 00 X = vor(vandnot(Z, X), B); // SHA-1 requires us to byte swap all the 32bit words in the message, which // we do here. // X = 40 41 42 44 45 80 00 00 00 00 00 00 00 00 00 // What we have. // X = 44 42 41 40 00 00 80 45 00 00 00 00 00 00 00 // What we want. vswap32(X); // Store the result into the message buffer. vstore(&M[index], X); return; } static char *sha1_fmt_get_key(int index) { static uint32_t key[VWIDTH + 1]; int i; // This function is not hot, we can do this slowly. First, restore // endianness. for (i = 0; i < SIMD_COEF_32; i++) key[i] = __builtin_bswap32(M[index][i]); // Skip backwards until we hit the trailing bit, then remove it. memset(strrchr((char*)(key), 0x80), 0x00, 1); return (char*) key; } static int sha1_fmt_crypt_all(int *pcount, struct db_salt *salt) { uint32_t i; // Fetch crypt count from john. const int32_t count = *pcount; // To reduce the overhead of multiple function calls, we buffer lots of // passwords, and then hash them in multiples of VWIDTH all at once. #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i += VWIDTH) { vtype W[SHA1_BLOCK_WORDS]; vtype A, B, C, D, E; vtype K; #if __AVX512F__ || __MIC__ const vtype indices = vset_epi32(15<<4,14<<4,13<<4,12<<4, 11<<4,10<<4, 9<<4, 8<<4, 7<<4, 6<<4, 5<<4, 4<<4, 3<<4, 2<<4, 1<<4, 0<<4); #elif __AVX2__ const vtype indices = vset_epi32( 7<<3, 6<<3, 5<<3, 4<<3, 3<<3, 2<<3, 1<<3, 0<<3); #endif #if __AVX2__ || __MIC__ // Gather the message right into place. uint32_t j; for (j = 0; j < VWIDTH; ++j) W[j] = vgather_epi32(&M[i][j], indices, sizeof(uint32_t)); #else // AVX has no gather instructions, so load and transpose. W[0] = vload(&M[i + 0]); W[1] = vload(&M[i + 1]); W[2] = vload(&M[i + 2]); W[3] = vload(&M[i + 3]); _MM_TRANSPOSE4_EPI32(W[0], W[1], W[2], W[3]); #endif A = vset1_epi32(0x67452301); B = vset1_epi32(0xEFCDAB89); C = vset1_epi32(0x98BADCFE); D = vset1_epi32(0x10325476); E = vset1_epi32(0xC3D2E1F0); K = vset1_epi32(0x5A827999); R1(W[0], A, B, C, D, E); R1(W[1], E, A, B, C, D); R1(W[2], D, E, A, B, C); #if VWIDTH > 4 R1(W[3], C, D, E, A, B); R1(W[4], B, C, D, E, A); R1(W[5], A, B, C, D, E); // 5 R1(W[6], E, A, B, C, D); #else R1(W[3], C, D, E, A, B); W[4] = vsetzero(); R1(W[4], B, C, D, E, A); W[5] = vsetzero(); R1(W[5], A, B, C, D, E); W[6] = vsetzero(); // 5 R1(W[6], E, A, B, C, D); W[7] = vsetzero(); #endif #if VWIDTH > 8 R1(W[7], D, E, A, B, C); R1(W[8], C, D, E, A, B); R1(W[9], B, C, D, E, A); R1(W[10], A, B, C, D, E); // 10 R1(W[11], E, A, B, C, D); R1(W[12], D, E, A, B, C); R1(W[13], C, D, E, A, B); R1(W[14], B, C, D, E, A); #else R1(W[7], D, E, A, B, C); W[8] = vsetzero(); R1(W[8], C, D, E, A, B); W[9] = vsetzero(); R1(W[9], B, C, D, E, A); W[10] = vsetzero(); R1(W[10], A, B, C, D, E); W[11] = vsetzero(); // 10 R1(W[11], E, A, B, C, D); W[12] = vsetzero(); R1(W[12], D, E, A, B, C); W[13] = vsetzero(); R1(W[13], C, D, E, A, B); W[14] = vsetzero(); R1(W[14], B, C, D, E, A); #endif // Fetch the message lengths, multiply 8 (to get the length in bits). W[15] = vslli_epi32(vload(&N[i]), 3); R1(W[15], A, B, C, D, E); // 15 X(W[0], W[2], W[8], W[13]); R1(W[0], E, A, B, C, D); X(W[1], W[3], W[9], W[14]); R1(W[1], D, E, A, B, C); X(W[2], W[4], W[10], W[15]); R1(W[2], C, D, E, A, B); X(W[3], W[5], W[11], W[0]); R1(W[3], B, C, D, E, A); K = vset1_epi32(0x6ED9EBA1); X(W[4], W[6], W[12], W[1]); R2(W[4], A, B, C, D, E); // 20 X(W[5], W[7], W[13], W[2]); R2(W[5], E, A, B, C, D); X(W[6], W[8], W[14], W[3]); R2(W[6], D, E, A, B, C); X(W[7], W[9], W[15], W[4]); R2(W[7], C, D, E, A, B); X(W[8], W[10], W[0], W[5]); R2(W[8], B, C, D, E, A); X(W[9], W[11], W[1], W[6]); R2(W[9], A, B, C, D, E); // 25 X(W[10], W[12], W[2], W[7]); R2(W[10], E, A, B, C, D); X(W[11], W[13], W[3], W[8]); R2(W[11], D, E, A, B, C); X(W[12], W[14], W[4], W[9]); R2(W[12], C, D, E, A, B); X(W[13], W[15], W[5], W[10]); R2(W[13], B, C, D, E, A); X(W[14], W[0], W[6], W[11]); R2(W[14], A, B, C, D, E); // 30 X(W[15], W[1], W[7], W[12]); R2(W[15], E, A, B, C, D); X(W[0], W[2], W[8], W[13]); R2(W[0], D, E, A, B, C); X(W[1], W[3], W[9], W[14]); R2(W[1], C, D, E, A, B); X(W[2], W[4], W[10], W[15]); R2(W[2], B, C, D, E, A); X(W[3], W[5], W[11], W[0]); R2(W[3], A, B, C, D, E); // 35 X(W[4], W[6], W[12], W[1]); R2(W[4], E, A, B, C, D); X(W[5], W[7], W[13], W[2]); R2(W[5], D, E, A, B, C); X(W[6], W[8], W[14], W[3]); R2(W[6], C, D, E, A, B); X(W[7], W[9], W[15], W[4]); R2(W[7], B, C, D, E, A); K = vset1_epi32(0x8F1BBCDC); X(W[8], W[10], W[0], W[5]); R3(W[8], A, B, C, D, E); // 40 X(W[9], W[11], W[1], W[6]); R3(W[9], E, A, B, C, D); X(W[10], W[12], W[2], W[7]); R3(W[10], D, E, A, B, C); X(W[11], W[13], W[3], W[8]); R3(W[11], C, D, E, A, B); X(W[12], W[14], W[4], W[9]); R3(W[12], B, C, D, E, A); X(W[13], W[15], W[5], W[10]); R3(W[13], A, B, C, D, E); // 45 X(W[14], W[0], W[6], W[11]); R3(W[14], E, A, B, C, D); X(W[15], W[1], W[7], W[12]); R3(W[15], D, E, A, B, C); X(W[0], W[2], W[8], W[13]); R3(W[0], C, D, E, A, B); X(W[1], W[3], W[9], W[14]); R3(W[1], B, C, D, E, A); X(W[2], W[4], W[10], W[15]); R3(W[2], A, B, C, D, E); // 50 X(W[3], W[5], W[11], W[0]); R3(W[3], E, A, B, C, D); X(W[4], W[6], W[12], W[1]); R3(W[4], D, E, A, B, C); X(W[5], W[7], W[13], W[2]); R3(W[5], C, D, E, A, B); X(W[6], W[8], W[14], W[3]); R3(W[6], B, C, D, E, A); X(W[7], W[9], W[15], W[4]); R3(W[7], A, B, C, D, E); // 55 X(W[8], W[10], W[0], W[5]); R3(W[8], E, A, B, C, D); X(W[9], W[11], W[1], W[6]); R3(W[9], D, E, A, B, C); X(W[10], W[12], W[2], W[7]); R3(W[10], C, D, E, A, B); X(W[11], W[13], W[3], W[8]); R3(W[11], B, C, D, E, A); K = vset1_epi32(0xCA62C1D6); X(W[12], W[14], W[4], W[9]); R2(W[12], A, B, C, D, E); // 60 X(W[13], W[15], W[5], W[10]); R2(W[13], E, A, B, C, D); X(W[14], W[0], W[6], W[11]); R2(W[14], D, E, A, B, C); X(W[15], W[1], W[7], W[12]); R2(W[15], C, D, E, A, B); X(W[0], W[2], W[8], W[13]); R2(W[0], B, C, D, E, A); X(W[1], W[3], W[9], W[14]); R2(W[1], A, B, C, D, E); // 65 X(W[2], W[4], W[10], W[15]); R2(W[2], E, A, B, C, D); X(W[3], W[5], W[11], W[0]); R2(W[3], D, E, A, B, C); X(W[4], W[6], W[12], W[1]); R2(W[4], C, D, E, A, B); X(W[5], W[7], W[13], W[2]); R2(W[5], B, C, D, E, A); X(W[6], W[8], W[14], W[3]); R2(W[6], A, B, C, D, E); // 70 X(W[7], W[9], W[15], W[4]); R2(W[7], E, A, B, C, D); X(W[8], W[10], W[0], W[5]); R2(W[8], D, E, A, B, C); X(W[9], W[11], W[1], W[6]); R2(W[9], C, D, E, A, B); X(W[10], W[12], W[2], W[7]); R2(W[10], B, C, D, E, A); X(W[11], W[13], W[3], W[8]); R4(W[11], A, B, C, D, E); // 75 // A75 has an interesting property, it is the first word that's (almost) // part of the final MD (E79 ror 2). The common case will be that this // doesn't match, so we stop here and save 5 rounds. // // Note that I'm using E due to displacement caused by vectorization, // this is A in standard SHA-1. vstore(&MD[i], E); } return count; } static int sha1_fmt_cmp_all(void *binary, int count) { uint32_t M; uint32_t i; vtype B; // This function is hot, we need to do this quickly. We use PCMP to find // out if any of the dwords in A75 matched E in the input hash. // First, Load the target hash into an XMM register B = vloadu(binary); M = 0; #ifdef _OPENMP #pragma omp parallel for reduction(|:M) #endif // We can test for matches 4/8 at a time. As the common case will be that // there is no match, we can avoid testing it after every compare, reducing // the number of branches. // // It's hard to convince GCC that it's safe to unroll this loop, so I've // manually unrolled it a little bit. for (i = 0; i < count; i += 64) { uint32_t R = 0; #if __AVX512F__ || __MIC__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); #elif __AVX2__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); #else R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 4])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 12])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 20])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 28])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 36])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 44])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 52])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); R |= vanyeq_epi32(B, vload(&MD[i + 60])); #endif M |= R; } return M; } inline static int sha1_fmt_get_hash(int index) { return MD[index]; } static int sha1_fmt_get_hash0(int index) { return sha1_fmt_get_hash(index) & PH_MASK_0; } static int sha1_fmt_get_hash1(int index) { return sha1_fmt_get_hash(index) & PH_MASK_1; } static int sha1_fmt_get_hash2(int index) { return sha1_fmt_get_hash(index) & PH_MASK_2; } static int sha1_fmt_get_hash3(int index) { return sha1_fmt_get_hash(index) & PH_MASK_3; } static int sha1_fmt_get_hash4(int index) { return sha1_fmt_get_hash(index) & PH_MASK_4; } static int sha1_fmt_get_hash5(int index) { return sha1_fmt_get_hash(index) & PH_MASK_5; } static int sha1_fmt_get_hash6(int index) { return sha1_fmt_get_hash(index) & PH_MASK_6; } inline static int sha1_fmt_get_binary(void *binary) { return *(uint32_t*)(binary); } static int sha1_fmt_binary0(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_0; } static int sha1_fmt_binary1(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_1; } static int sha1_fmt_binary2(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_2; } static int sha1_fmt_binary3(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_3; } static int sha1_fmt_binary4(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_4; } static int sha1_fmt_binary5(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_5; } static int sha1_fmt_binary6(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_6; } static int sha1_fmt_cmp_one(void *binary, int index) { // We can quickly check if it will be worth doing a full comparison here, // this lets us turn up SHA1_PARALLEL_HASH without too much overhead when a // partial match occurs. return sha1_fmt_get_binary(binary) == sha1_fmt_get_hash(index); } // This function is not hot, and will only be called for around 1:2^32 random // crypts. Use a real SHA-1 implementation to verify the result exactly. This // routine is only called by John when cmp_one succeeds. static int sha1_fmt_cmp_exact(char *source, int index) { uint32_t full_sha1_digest[SHA1_DIGEST_WORDS]; SHA_CTX ctx; char *key; // Fetch the original input to hash. key = sha1_fmt_get_key(index); SHA1_Init(&ctx); SHA1_Update(&ctx, key, strlen(key)); SHA1_Final((unsigned char*)(full_sha1_digest), &ctx); // Compare result. return !memcmp(rawsha1_common_get_binary(source), full_sha1_digest, sizeof(full_sha1_digest)); } struct fmt_main fmt_sha1_ng = { .params = { .label = "Raw-SHA1-ng", #if VWIDTH == 16 .format_name = "(pwlen <= 55)", #if __MIC__ .algorithm_name = "SHA1 512/512 MIC 16x", #else .algorithm_name = "SHA1 512/512 AVX512 16x", #endif #elif VWIDTH == 8 .format_name = "(pwlen <= 31)", .algorithm_name = "SHA1 256/256 AVX2 8x", #else .format_name = "(pwlen <= 15)", .algorithm_name = "SHA1 128/128 " #if __ALTIVEC__ "AltiVec" #elif __ARM_NEON "NEON" #elif __XOP__ "XOP" #elif __AVX__ "AVX" #elif __SSE4_1__ "SSE4.1" #else "SSE2" #endif " 4x", #endif .benchmark_comment = "", .benchmark_length = -1, #if VWIDTH * 4 - 1 > 55 .plaintext_length = 55, #else .plaintext_length = sizeof(vtype) - 1, #endif .binary_size = sizeof(vtype), .binary_align = VWIDTH * 4, .salt_size = 0, .salt_align = 1, .min_keys_per_crypt = VWIDTH, .max_keys_per_crypt = SHA1_PARALLEL_HASH, .flags = #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, .tunable_cost_name = { NULL }, .signature = { FORMAT_TAG, FORMAT_TAG_OLD }, .tests = rawsha1_common_tests, }, .methods = { .init = sha1_fmt_init, .done = done, .reset = fmt_default_reset, .prepare = rawsha1_common_prepare, .valid = rawsha1_common_valid, .split = rawsha1_common_split, .binary = sha1_fmt_binary, .salt = fmt_default_salt, .tunable_cost_value = { NULL }, .source = fmt_default_source, .salt_hash = fmt_default_salt_hash, .set_salt = fmt_default_set_salt, .set_key = sha1_fmt_set_key, .get_key = sha1_fmt_get_key, .clear_keys = fmt_default_clear_keys, .crypt_all = sha1_fmt_crypt_all, .get_hash = { [0] = sha1_fmt_get_hash0, [1] = sha1_fmt_get_hash1, [2] = sha1_fmt_get_hash2, [3] = sha1_fmt_get_hash3, [4] = sha1_fmt_get_hash4, [5] = sha1_fmt_get_hash5, [6] = sha1_fmt_get_hash6, }, .binary_hash = { [0] = sha1_fmt_binary0, [1] = sha1_fmt_binary1, [2] = sha1_fmt_binary2, [3] = sha1_fmt_binary3, [4] = sha1_fmt_binary4, [5] = sha1_fmt_binary5, [6] = sha1_fmt_binary6, }, .cmp_all = sha1_fmt_cmp_all, .cmp_one = sha1_fmt_cmp_one, .cmp_exact = sha1_fmt_cmp_exact }, }; #endif /* plugin stanza */ #endif /* defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER */
// // Alternative SSE2 optimised raw SHA-1 implementation for John The Ripper. // // This plugin requires -msse4 in CFLAGS. // // Copyright (C) 2012 Tavis Ormandy <taviso@cmpxchg8b.com> // Copyright (c) 2015 magnum (AVX2/AVX512 support) // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Library General Public // License as published by the Free Software Foundation; either // version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Library General Public License for more details. // // You should have received a copy of the GNU Library General Public // License along with this library; if not, write to the // Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, // Boston, MA 02110-1301, USA. // #include "arch.h" #if defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER && !__ARM_NEON #if FMT_EXTERNS_H extern struct fmt_main fmt_sha1_ng; #elif FMT_REGISTERS_H john_register_one(&fmt_sha1_ng); #else #include "misc.h" #if !defined(DEBUG) && !defined(WITH_ASAN) // These compilers claim to be __GNUC__ but warn on gcc pragmas. #if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER #pragma GCC optimize 3 #pragma GCC optimize "-fprefetch-loop-arrays" #endif #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 #endif #include <string.h> #include <stdint.h> #if !FAST_FORMATS_OMP #undef _OPENMP #elif _OPENMP #include <omp.h> #endif #include "stdbool.h" #if SIMD_COEF_32 > 8 #include "int128.h" #endif #include "pseudo_intrinsics.h" #include "params.h" #include "formats.h" #include "memory.h" #include "sha.h" #include "johnswap.h" #include "aligned.h" #include "rawSHA1_common.h" #include "memdbg.h" #define VWIDTH SIMD_COEF_32 #define SHA1_BLOCK_WORDS 16 #define SHA1_DIGEST_WORDS 5 #define SHA1_PARALLEL_HASH 512 // This must be a multiple of max VWIDTH. #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 // Multiplier to hide OMP overhead #endif #endif #define X(X0, X2, X8, X13) do { \ X0 = vxor(X0, X8); \ X0 = vxor(X0, X13); \ X0 = vxor(X0, X2); \ X0 = vroti_epi32(X0, 1); \ } while (false) #define R1(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(C, D, B)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R2(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R4(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #if !VCMOV_EMULATED #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(D, B, vxor(C, B))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #else #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vor(vand(D, B), vand(vor(D, B), C))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #endif #if SIMD_COEF_32 == 4 // Not used for AVX2 and better, which has gather instructions. #define _MM_TRANSPOSE4_EPI32(R0, R1, R2, R3) do {\ vtype T0, T1, T2, T3; \ T0 = vunpacklo_epi32(R0, R1); \ T1 = vunpacklo_epi32(R2, R3); \ T2 = vunpackhi_epi32(R0, R1); \ T3 = vunpackhi_epi32(R2, R3); \ R0 = vunpacklo_epi64(T0, T1); \ R1 = vunpackhi_epi64(T0, T1); \ R2 = vunpacklo_epi64(T2, T3); \ R3 = vunpackhi_epi64(T2, T3); \ } while (false) #endif // M and N contain the first and last 128bits of a 512bit SHA-1 message block // respectively. The remaining 256bits are always zero, and so are not stored // here to avoid the load overhead. // For AVX2, we have half a block and for AVX512/MIC we actually have a full // block. static uint32_t (*M)[VWIDTH]; static uint32_t *N; // MD contains the state of the SHA-1 A register at R75 for each of the input // messages. static uint32_t *MD; /* unused inline static uint32_t __attribute__((const)) rotateright(uint32_t value, uint8_t count) { register uint32_t result; asm("ror %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); return result; } */ inline static uint32_t __attribute__((const)) rotateleft(uint32_t value, uint8_t count) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _rotl(value, count); //((value<<count)|((uint32_t)value>>(32-count))); #elif __i386__ || __x86_64__ asm("rol %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); #else // assume count <= 32 result = (value << count) | (value >> (32 - count)); #endif return result; } // GCC < 4.3 does not have __builtin_bswap32(), provide an alternative. #if !__INTEL_COMPILER && GCC_VERSION < 40300 #define __builtin_bswap32 bswap32 inline static uint32_t __attribute__((const)) bswap32(uint32_t value) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _byteswap_ulong(value); #elif __i386 || __x86_64__ asm("bswap %0" : "=r" (result) : "0" (value)); #else result = (value << 24) | ((value << 8) & 0xFF0000) | (value >> 24) | ((value >> 8) & 0xFF00); #endif return result; } #endif static void sha1_fmt_init(struct fmt_main *self) { M = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*M), MEM_ALIGN_CACHE); N = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*N), MEM_ALIGN_CACHE); MD = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*MD), MEM_ALIGN_CACHE); } static void done(void) { MEM_FREE(MD); MEM_FREE(N); MEM_FREE(M); } static void *sha1_fmt_binary(char *ciphertext) { // Static buffer storing the binary representation of ciphertext. static union { uint32_t w[SHA1_DIGEST_WORDS]; vtype v; } result; uint32_t a75; // Convert ascii representation into binary. memcpy(result.w, rawsha1_common_get_binary(ciphertext), 20); // One preprocessing step, if we calculate E80 rol 2 here, we // can compare it against A75 and save 5 rounds in crypt_all(). a75 = rotateleft(__builtin_bswap32(result.w[4]) - 0xC3D2E1F0, 2); // Fill the vector with it, so we can do a vectorized compare result.v = vset1_epi32(a75); return result.w; } // This function is called when John wants us to buffer a crypt() operation // on the specified key. We also preprocess it for SHA-1 as we load it. // // This implementation is hardcoded to only accept passwords under 15 // characters. This is because we can create a new message block in just two // MOVDQA instructions (we need 15 instead of 16 because we must append a bit // to the message). For AVX2 it's 31 characters and for AVX-512+ it's 125. // // This routine assumes that key is not on an unmapped page boundary, but // doesn't require it to be 16 byte aligned (although that would be nice). static void sha1_fmt_set_key(char *key, int index) { vtype Z = vsetzero(); vtype X = vloadu(key); vtype B; // First, find the length of the key by scanning for a zero byte. #if (__AVX512F__ && !__AVX512BW__) || __MIC__ || __ALTIVEC__ || __ARM_NEON uint32_t len = strlen(key); #else // FIXME: even uint64_t won't be long enough for AVX-1024 uint64_t mask = vcmpeq_epi8_mask(X, Z); uint32_t len = __builtin_ctzl(mask); #endif // Create a lookup tables to find correct masks for each supported input // length. It would be nice if we could use bit shifts to produce these // dynamically, but they require an immediate operand. #if VWIDTH > 8 // FIXME: a problem with using int128 here is it won't work at // all for 32-bit builds - but that may be academic. #define XX ((((uint128_t)0xFFFFFFFFFFFFFFFFULL)<<64) + 0xFFFFFFFFFFFFFFFFULL) #define YY ((uint128_t)0x80) #define ZZ ((uint128_t)0x0) static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kTrailingBitTable[][4] = { {YY<< 0, ZZ, ZZ, ZZ}, {YY<< 8, ZZ, ZZ, ZZ}, {YY<< 16, ZZ, ZZ, ZZ}, {YY<< 24, ZZ, ZZ, ZZ}, {YY<< 32, ZZ, ZZ, ZZ}, {YY<< 40, ZZ, ZZ, ZZ}, {YY<< 48, ZZ, ZZ, ZZ}, {YY<< 56, ZZ, ZZ, ZZ}, {YY<< 64, ZZ, ZZ, ZZ}, {YY<< 72, ZZ, ZZ, ZZ}, {YY<< 80, ZZ, ZZ, ZZ}, {YY<< 88, ZZ, ZZ, ZZ}, {YY<< 96, ZZ, ZZ, ZZ}, {YY<<104, ZZ, ZZ, ZZ}, {YY<<112, ZZ, ZZ, ZZ}, {YY<<120, ZZ, ZZ, ZZ}, {ZZ, YY<< 0, ZZ, ZZ}, {ZZ, YY<< 8, ZZ, ZZ}, {ZZ, YY<< 16, ZZ, ZZ}, {ZZ, YY<< 24, ZZ, ZZ}, {ZZ, YY<< 32, ZZ, ZZ}, {ZZ, YY<< 40, ZZ, ZZ}, {ZZ, YY<< 48, ZZ, ZZ}, {ZZ, YY<< 56, ZZ, ZZ}, {ZZ, YY<< 64, ZZ, ZZ}, {ZZ, YY<< 72, ZZ, ZZ}, {ZZ, YY<< 80, ZZ, ZZ}, {ZZ, YY<< 88, ZZ, ZZ}, {ZZ, YY<< 96, ZZ, ZZ}, {ZZ, YY<<104, ZZ, ZZ}, {ZZ, YY<<112, ZZ, ZZ}, {ZZ, YY<<120, ZZ, ZZ}, {ZZ, ZZ, YY<< 0, ZZ}, {ZZ, ZZ, YY<< 8, ZZ}, {ZZ, ZZ, YY<< 16, ZZ}, {ZZ, ZZ, YY<< 24, ZZ}, {ZZ, ZZ, YY<< 32, ZZ}, {ZZ, ZZ, YY<< 40, ZZ}, {ZZ, ZZ, YY<< 48, ZZ}, {ZZ, ZZ, YY<< 56, ZZ}, {ZZ, ZZ, YY<< 64, ZZ}, {ZZ, ZZ, YY<< 72, ZZ}, {ZZ, ZZ, YY<< 80, ZZ}, {ZZ, ZZ, YY<< 88, ZZ}, {ZZ, ZZ, YY<< 96, ZZ}, {ZZ, ZZ, YY<<104, ZZ}, {ZZ, ZZ, YY<<112, ZZ}, {ZZ, ZZ, YY<<120, ZZ}, {ZZ, ZZ, ZZ, YY<< 0}, {ZZ, ZZ, ZZ, YY<< 8}, {ZZ, ZZ, ZZ, YY<< 16}, {ZZ, ZZ, ZZ, YY<< 24}, {ZZ, ZZ, ZZ, YY<< 32}, {ZZ, ZZ, ZZ, YY<< 40}, {ZZ, ZZ, ZZ, YY<< 48}, {ZZ, ZZ, ZZ, YY<< 56}, {ZZ, ZZ, ZZ, YY<< 64}, {ZZ, ZZ, ZZ, YY<< 72}, {ZZ, ZZ, ZZ, YY<< 80}, {ZZ, ZZ, ZZ, YY<< 88}, {ZZ, ZZ, ZZ, YY<< 96}, {ZZ, ZZ, ZZ, YY<<104}, {ZZ, ZZ, ZZ, YY<<112}, {ZZ, ZZ, ZZ, YY<<120} }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kUsedBytesTable[][4] = { {XX<< 0, XX, XX, XX}, {XX<< 8, XX, XX, XX}, {XX<< 16, XX, XX, XX}, {XX<< 24, XX, XX, XX}, {XX<< 32, XX, XX, XX}, {XX<< 40, XX, XX, XX}, {XX<< 48, XX, XX, XX}, {XX<< 56, XX, XX, XX}, {XX<< 64, XX, XX, XX}, {XX<< 72, XX, XX, XX}, {XX<< 80, XX, XX, XX}, {XX<< 88, XX, XX, XX}, {XX<< 96, XX, XX, XX}, {XX<<104, XX, XX, XX}, {XX<<112, XX, XX, XX}, {XX<<120, XX, XX, XX}, {ZZ, XX<< 0, XX, XX}, {ZZ, XX<< 8, XX, XX}, {ZZ, XX<< 16, XX, XX}, {ZZ, XX<< 24, XX, XX}, {ZZ, XX<< 32, XX, XX}, {ZZ, XX<< 40, XX, XX}, {ZZ, XX<< 48, XX, XX}, {ZZ, XX<< 56, XX, XX}, {ZZ, XX<< 64, XX, XX}, {ZZ, XX<< 72, XX, XX}, {ZZ, XX<< 80, XX, XX}, {ZZ, XX<< 88, XX, XX}, {ZZ, XX<< 96, XX, XX}, {ZZ, XX<<104, XX, XX}, {ZZ, XX<<112, XX, XX}, {ZZ, XX<<120, XX, XX}, {ZZ, ZZ, XX<< 0, XX}, {ZZ, ZZ, XX<< 8, XX}, {ZZ, ZZ, XX<< 16, XX}, {ZZ, ZZ, XX<< 24, XX}, {ZZ, ZZ, XX<< 32, XX}, {ZZ, ZZ, XX<< 40, XX}, {ZZ, ZZ, XX<< 48, XX}, {ZZ, ZZ, XX<< 56, XX}, {ZZ, ZZ, XX<< 64, XX}, {ZZ, ZZ, XX<< 72, XX}, {ZZ, ZZ, XX<< 80, XX}, {ZZ, ZZ, XX<< 88, XX}, {ZZ, ZZ, XX<< 96, XX}, {ZZ, ZZ, XX<<104, XX}, {ZZ, ZZ, XX<<112, XX}, {ZZ, ZZ, XX<<120, XX}, {ZZ, ZZ, ZZ, XX<< 0}, {ZZ, ZZ, ZZ, XX<< 8}, {ZZ, ZZ, ZZ, XX<< 16}, {ZZ, ZZ, ZZ, XX<< 24}, {ZZ, ZZ, ZZ, XX<< 32}, {ZZ, ZZ, ZZ, XX<< 40}, {ZZ, ZZ, ZZ, XX<< 48}, {ZZ, ZZ, ZZ, XX<< 56}, {ZZ, ZZ, ZZ, XX<< 64}, {ZZ, ZZ, ZZ, XX<< 72}, {ZZ, ZZ, ZZ, XX<< 80}, {ZZ, ZZ, ZZ, XX<< 88}, {ZZ, ZZ, ZZ, XX<< 96}, {ZZ, ZZ, ZZ, XX<<104}, {ZZ, ZZ, ZZ, XX<<112}, {ZZ, ZZ, ZZ, XX<<120} }; #elif VWIDTH > 4 static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][8] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][8] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #else static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][4] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][4] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #endif N[index] = len; // Zero out the rest of the DQWORD in X by making a suitable mask. Z = vload(kUsedBytesTable[len]); // Find the correct position for the trailing bit required by SHA-1. B = vload(kTrailingBitTable[len]); // Now we have this: // B = 00 00 00 00 00 80 00 00 00 00 00 00 00 00 00 // Z = 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff // X = 41 41 41 41 41 00 12 34 56 78 12 34 56 78 9A // <---------------> <------------------------> // key bytes w/nul junk from stack. // Use PANDN to apply the mask, then POR to append the trailing bit // required by SHA-1, which leaves us with this: // X = 41 41 41 41 41 80 00 00 00 00 00 00 00 00 00 X = vor(vandnot(Z, X), B); // SHA-1 requires us to byte swap all the 32bit words in the message, which // we do here. // X = 40 41 42 44 45 80 00 00 00 00 00 00 00 00 00 // What we have. // X = 44 42 41 40 00 00 80 45 00 00 00 00 00 00 00 // What we want. vswap32(X); // Store the result into the message buffer. vstore(&M[index], X); return; } static char *sha1_fmt_get_key(int index) { static uint32_t key[VWIDTH + 1]; int i; // This function is not hot, we can do this slowly. First, restore // endianness. for (i = 0; i < SIMD_COEF_32; i++) key[i] = __builtin_bswap32(M[index][i]); // Skip backwards until we hit the trailing bit, then remove it. memset(strrchr((char*)(key), 0x80), 0x00, 1); return (char*) key; } static int sha1_fmt_crypt_all(int *pcount, struct db_salt *salt) { uint32_t i; // Fetch crypt count from john. const int32_t count = *pcount; // To reduce the overhead of multiple function calls, we buffer lots of // passwords, and then hash them in multiples of VWIDTH all at once. for (i = 0; i < count; i += VWIDTH) { vtype W[SHA1_BLOCK_WORDS]; vtype A, B, C, D, E; vtype K; #if __AVX512F__ || __MIC__ const vtype indices = vset_epi32(15<<4,14<<4,13<<4,12<<4, 11<<4,10<<4, 9<<4, 8<<4, 7<<4, 6<<4, 5<<4, 4<<4, 3<<4, 2<<4, 1<<4, 0<<4); #elif __AVX2__ const vtype indices = vset_epi32( 7<<3, 6<<3, 5<<3, 4<<3, 3<<3, 2<<3, 1<<3, 0<<3); #endif #if __AVX2__ || __MIC__ // Gather the message right into place. uint32_t j; for (j = 0; j < VWIDTH; ++j) W[j] = vgather_epi32(&M[i][j], indices, sizeof(uint32_t)); #else // AVX has no gather instructions, so load and transpose. W[0] = vload(&M[i + 0]); W[1] = vload(&M[i + 1]); W[2] = vload(&M[i + 2]); W[3] = vload(&M[i + 3]); _MM_TRANSPOSE4_EPI32(W[0], W[1], W[2], W[3]); #endif A = vset1_epi32(0x67452301); B = vset1_epi32(0xEFCDAB89); C = vset1_epi32(0x98BADCFE); D = vset1_epi32(0x10325476); E = vset1_epi32(0xC3D2E1F0); K = vset1_epi32(0x5A827999); R1(W[0], A, B, C, D, E); R1(W[1], E, A, B, C, D); R1(W[2], D, E, A, B, C); #if VWIDTH > 4 R1(W[3], C, D, E, A, B); R1(W[4], B, C, D, E, A); R1(W[5], A, B, C, D, E); // 5 R1(W[6], E, A, B, C, D); #else R1(W[3], C, D, E, A, B); W[4] = vsetzero(); R1(W[4], B, C, D, E, A); W[5] = vsetzero(); R1(W[5], A, B, C, D, E); W[6] = vsetzero(); // 5 R1(W[6], E, A, B, C, D); W[7] = vsetzero(); #endif #if VWIDTH > 8 R1(W[7], D, E, A, B, C); R1(W[8], C, D, E, A, B); R1(W[9], B, C, D, E, A); R1(W[10], A, B, C, D, E); // 10 R1(W[11], E, A, B, C, D); R1(W[12], D, E, A, B, C); R1(W[13], C, D, E, A, B); R1(W[14], B, C, D, E, A); #else R1(W[7], D, E, A, B, C); W[8] = vsetzero(); R1(W[8], C, D, E, A, B); W[9] = vsetzero(); R1(W[9], B, C, D, E, A); W[10] = vsetzero(); R1(W[10], A, B, C, D, E); W[11] = vsetzero(); // 10 R1(W[11], E, A, B, C, D); W[12] = vsetzero(); R1(W[12], D, E, A, B, C); W[13] = vsetzero(); R1(W[13], C, D, E, A, B); W[14] = vsetzero(); R1(W[14], B, C, D, E, A); #endif // Fetch the message lengths, multiply 8 (to get the length in bits). W[15] = vslli_epi32(vload(&N[i]), 3); R1(W[15], A, B, C, D, E); // 15 X(W[0], W[2], W[8], W[13]); R1(W[0], E, A, B, C, D); X(W[1], W[3], W[9], W[14]); R1(W[1], D, E, A, B, C); X(W[2], W[4], W[10], W[15]); R1(W[2], C, D, E, A, B); X(W[3], W[5], W[11], W[0]); R1(W[3], B, C, D, E, A); K = vset1_epi32(0x6ED9EBA1); X(W[4], W[6], W[12], W[1]); R2(W[4], A, B, C, D, E); // 20 X(W[5], W[7], W[13], W[2]); R2(W[5], E, A, B, C, D); X(W[6], W[8], W[14], W[3]); R2(W[6], D, E, A, B, C); X(W[7], W[9], W[15], W[4]); R2(W[7], C, D, E, A, B); X(W[8], W[10], W[0], W[5]); R2(W[8], B, C, D, E, A); X(W[9], W[11], W[1], W[6]); R2(W[9], A, B, C, D, E); // 25 X(W[10], W[12], W[2], W[7]); R2(W[10], E, A, B, C, D); X(W[11], W[13], W[3], W[8]); R2(W[11], D, E, A, B, C); X(W[12], W[14], W[4], W[9]); R2(W[12], C, D, E, A, B); X(W[13], W[15], W[5], W[10]); R2(W[13], B, C, D, E, A); X(W[14], W[0], W[6], W[11]); R2(W[14], A, B, C, D, E); // 30 X(W[15], W[1], W[7], W[12]); R2(W[15], E, A, B, C, D); X(W[0], W[2], W[8], W[13]); R2(W[0], D, E, A, B, C); X(W[1], W[3], W[9], W[14]); R2(W[1], C, D, E, A, B); X(W[2], W[4], W[10], W[15]); R2(W[2], B, C, D, E, A); X(W[3], W[5], W[11], W[0]); R2(W[3], A, B, C, D, E); // 35 X(W[4], W[6], W[12], W[1]); R2(W[4], E, A, B, C, D); X(W[5], W[7], W[13], W[2]); R2(W[5], D, E, A, B, C); X(W[6], W[8], W[14], W[3]); R2(W[6], C, D, E, A, B); X(W[7], W[9], W[15], W[4]); R2(W[7], B, C, D, E, A); K = vset1_epi32(0x8F1BBCDC); X(W[8], W[10], W[0], W[5]); R3(W[8], A, B, C, D, E); // 40 X(W[9], W[11], W[1], W[6]); R3(W[9], E, A, B, C, D); X(W[10], W[12], W[2], W[7]); R3(W[10], D, E, A, B, C); X(W[11], W[13], W[3], W[8]); R3(W[11], C, D, E, A, B); X(W[12], W[14], W[4], W[9]); R3(W[12], B, C, D, E, A); X(W[13], W[15], W[5], W[10]); R3(W[13], A, B, C, D, E); // 45 X(W[14], W[0], W[6], W[11]); R3(W[14], E, A, B, C, D); X(W[15], W[1], W[7], W[12]); R3(W[15], D, E, A, B, C); X(W[0], W[2], W[8], W[13]); R3(W[0], C, D, E, A, B); X(W[1], W[3], W[9], W[14]); R3(W[1], B, C, D, E, A); X(W[2], W[4], W[10], W[15]); R3(W[2], A, B, C, D, E); // 50 X(W[3], W[5], W[11], W[0]); R3(W[3], E, A, B, C, D); X(W[4], W[6], W[12], W[1]); R3(W[4], D, E, A, B, C); X(W[5], W[7], W[13], W[2]); R3(W[5], C, D, E, A, B); X(W[6], W[8], W[14], W[3]); R3(W[6], B, C, D, E, A); X(W[7], W[9], W[15], W[4]); R3(W[7], A, B, C, D, E); // 55 X(W[8], W[10], W[0], W[5]); R3(W[8], E, A, B, C, D); X(W[9], W[11], W[1], W[6]); R3(W[9], D, E, A, B, C); X(W[10], W[12], W[2], W[7]); R3(W[10], C, D, E, A, B); X(W[11], W[13], W[3], W[8]); R3(W[11], B, C, D, E, A); K = vset1_epi32(0xCA62C1D6); X(W[12], W[14], W[4], W[9]); R2(W[12], A, B, C, D, E); // 60 X(W[13], W[15], W[5], W[10]); R2(W[13], E, A, B, C, D); X(W[14], W[0], W[6], W[11]); R2(W[14], D, E, A, B, C); X(W[15], W[1], W[7], W[12]); R2(W[15], C, D, E, A, B); X(W[0], W[2], W[8], W[13]); R2(W[0], B, C, D, E, A); X(W[1], W[3], W[9], W[14]); R2(W[1], A, B, C, D, E); // 65 X(W[2], W[4], W[10], W[15]); R2(W[2], E, A, B, C, D); X(W[3], W[5], W[11], W[0]); R2(W[3], D, E, A, B, C); X(W[4], W[6], W[12], W[1]); R2(W[4], C, D, E, A, B); X(W[5], W[7], W[13], W[2]); R2(W[5], B, C, D, E, A); X(W[6], W[8], W[14], W[3]); R2(W[6], A, B, C, D, E); // 70 X(W[7], W[9], W[15], W[4]); R2(W[7], E, A, B, C, D); X(W[8], W[10], W[0], W[5]); R2(W[8], D, E, A, B, C); X(W[9], W[11], W[1], W[6]); R2(W[9], C, D, E, A, B); X(W[10], W[12], W[2], W[7]); R2(W[10], B, C, D, E, A); X(W[11], W[13], W[3], W[8]); R4(W[11], A, B, C, D, E); // 75 // A75 has an interesting property, it is the first word that's (almost) // part of the final MD (E79 ror 2). The common case will be that this // doesn't match, so we stop here and save 5 rounds. // // Note that I'm using E due to displacement caused by vectorization, // this is A in standard SHA-1. vstore(&MD[i], E); } return count; } static int sha1_fmt_cmp_all(void *binary, int count) { uint32_t M; uint32_t i; vtype B; // This function is hot, we need to do this quickly. We use PCMP to find // out if any of the dwords in A75 matched E in the input hash. // First, Load the target hash into an XMM register B = vloadu(binary); M = 0; // We can test for matches 4/8 at a time. As the common case will be that // there is no match, we can avoid testing it after every compare, reducing // the number of branches. // // It's hard to convince GCC that it's safe to unroll this loop, so I've // manually unrolled it a little bit. for (i = 0; i < count; i += 64) { uint32_t R = 0; #if __AVX512F__ || __MIC__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); #elif __AVX2__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); #else R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 4])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 12])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 20])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 28])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 36])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 44])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 52])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); R |= vanyeq_epi32(B, vload(&MD[i + 60])); #endif M |= R; } return M; } inline static int sha1_fmt_get_hash(int index) { return MD[index]; } static int sha1_fmt_get_hash0(int index) { return sha1_fmt_get_hash(index) & PH_MASK_0; } static int sha1_fmt_get_hash1(int index) { return sha1_fmt_get_hash(index) & PH_MASK_1; } static int sha1_fmt_get_hash2(int index) { return sha1_fmt_get_hash(index) & PH_MASK_2; } static int sha1_fmt_get_hash3(int index) { return sha1_fmt_get_hash(index) & PH_MASK_3; } static int sha1_fmt_get_hash4(int index) { return sha1_fmt_get_hash(index) & PH_MASK_4; } static int sha1_fmt_get_hash5(int index) { return sha1_fmt_get_hash(index) & PH_MASK_5; } static int sha1_fmt_get_hash6(int index) { return sha1_fmt_get_hash(index) & PH_MASK_6; } inline static int sha1_fmt_get_binary(void *binary) { return *(uint32_t*)(binary); } static int sha1_fmt_binary0(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_0; } static int sha1_fmt_binary1(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_1; } static int sha1_fmt_binary2(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_2; } static int sha1_fmt_binary3(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_3; } static int sha1_fmt_binary4(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_4; } static int sha1_fmt_binary5(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_5; } static int sha1_fmt_binary6(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_6; } static int sha1_fmt_cmp_one(void *binary, int index) { // We can quickly check if it will be worth doing a full comparison here, // this lets us turn up SHA1_PARALLEL_HASH without too much overhead when a // partial match occurs. return sha1_fmt_get_binary(binary) == sha1_fmt_get_hash(index); } // This function is not hot, and will only be called for around 1:2^32 random // crypts. Use a real SHA-1 implementation to verify the result exactly. This // routine is only called by John when cmp_one succeeds. static int sha1_fmt_cmp_exact(char *source, int index) { uint32_t full_sha1_digest[SHA1_DIGEST_WORDS]; SHA_CTX ctx; char *key; // Fetch the original input to hash. key = sha1_fmt_get_key(index); SHA1_Init(&ctx); SHA1_Update(&ctx, key, strlen(key)); SHA1_Final((unsigned char*)(full_sha1_digest), &ctx); // Compare result. return !memcmp(rawsha1_common_get_binary(source), full_sha1_digest, sizeof(full_sha1_digest)); } struct fmt_main fmt_sha1_ng = { .params = { .label = "Raw-SHA1-ng", #if VWIDTH == 16 .format_name = "(pwlen <= 55)", #if __MIC__ .algorithm_name = "SHA1 512/512 MIC 16x", #else .algorithm_name = "SHA1 512/512 AVX512 16x", #endif #elif VWIDTH == 8 .format_name = "(pwlen <= 31)", .algorithm_name = "SHA1 256/256 AVX2 8x", #else .format_name = "(pwlen <= 15)", .algorithm_name = "SHA1 128/128 " #if __ALTIVEC__ "AltiVec" #elif __ARM_NEON "NEON" #elif __XOP__ "XOP" #elif __AVX__ "AVX" #elif __SSE4_1__ "SSE4.1" #else "SSE2" #endif " 4x", #endif .benchmark_comment = "", .benchmark_length = -1, #if VWIDTH * 4 - 1 > 55 .plaintext_length = 55, #else .plaintext_length = sizeof(vtype) - 1, #endif .binary_size = sizeof(vtype), .binary_align = VWIDTH * 4, .salt_size = 0, .salt_align = 1, .min_keys_per_crypt = VWIDTH, .max_keys_per_crypt = SHA1_PARALLEL_HASH, .flags = FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, .tunable_cost_name = { NULL }, .signature = { FORMAT_TAG, FORMAT_TAG_OLD }, .tests = rawsha1_common_tests, }, .methods = { .init = sha1_fmt_init, .done = done, .reset = fmt_default_reset, .prepare = rawsha1_common_prepare, .valid = rawsha1_common_valid, .split = rawsha1_common_split, .binary = sha1_fmt_binary, .salt = fmt_default_salt, .tunable_cost_value = { NULL }, .source = fmt_default_source, .salt_hash = fmt_default_salt_hash, .set_salt = fmt_default_set_salt, .set_key = sha1_fmt_set_key, .get_key = sha1_fmt_get_key, .clear_keys = fmt_default_clear_keys, .crypt_all = sha1_fmt_crypt_all, .get_hash = { [0] = sha1_fmt_get_hash0, [1] = sha1_fmt_get_hash1, [2] = sha1_fmt_get_hash2, [3] = sha1_fmt_get_hash3, [4] = sha1_fmt_get_hash4, [5] = sha1_fmt_get_hash5, [6] = sha1_fmt_get_hash6, }, .binary_hash = { [0] = sha1_fmt_binary0, [1] = sha1_fmt_binary1, [2] = sha1_fmt_binary2, [3] = sha1_fmt_binary3, [4] = sha1_fmt_binary4, [5] = sha1_fmt_binary5, [6] = sha1_fmt_binary6, }, .cmp_all = sha1_fmt_cmp_all, .cmp_one = sha1_fmt_cmp_one, .cmp_exact = sha1_fmt_cmp_exact }, }; #endif /* plugin stanza */ #endif /* defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER */
// // Alternative SSE2 optimised raw SHA-1 implementation for John The Ripper. // // This plugin requires -msse4 in CFLAGS. // // Copyright (C) 2012 Tavis Ormandy <taviso@cmpxchg8b.com> // Copyright (c) 2015 magnum (AVX2/AVX512 support) // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Library General Public // License as published by the Free Software Foundation; either // version 2 of the License, or (at your option) any later version. // // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Library General Public License for more details. // // You should have received a copy of the GNU Library General Public // License along with this library; if not, write to the // Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, // Boston, MA 02110-1301, USA. // #include "arch.h" #if defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER && !__ARM_NEON #if FMT_EXTERNS_H extern struct fmt_main fmt_sha1_ng; #elif FMT_REGISTERS_H john_register_one(&fmt_sha1_ng); #else #include "misc.h" #if !defined(DEBUG) && !defined(WITH_ASAN) // These compilers claim to be __GNUC__ but warn on gcc pragmas. #if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER #pragma GCC optimize 3 #pragma GCC optimize "-fprefetch-loop-arrays" #endif #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE 1 #endif #include <string.h> #include <stdint.h> #if !FAST_FORMATS_OMP #undef _OPENMP #elif _OPENMP #include <omp.h> #endif #include "stdbool.h" #if SIMD_COEF_32 > 8 #include "int128.h" #endif #include "pseudo_intrinsics.h" #include "params.h" #include "formats.h" #include "memory.h" #include "sha.h" #include "johnswap.h" #include "aligned.h" #include "rawSHA1_common.h" #include "memdbg.h" #define VWIDTH SIMD_COEF_32 #define SHA1_BLOCK_WORDS 16 #define SHA1_DIGEST_WORDS 5 #define SHA1_PARALLEL_HASH 512 // This must be a multiple of max VWIDTH. #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 // Multiplier to hide OMP overhead #endif #endif #define X(X0, X2, X8, X13) do { \ X0 = vxor(X0, X8); \ X0 = vxor(X0, X13); \ X0 = vxor(X0, X2); \ X0 = vroti_epi32(X0, 1); \ } while (false) #define R1(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(C, D, B)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R2(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #define R4(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vxor(vxor(B, C), D)); \ E = vadd_epi32(E, W); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #if !VCMOV_EMULATED #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vcmov(D, B, vxor(C, B))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #else #define R3(W, A, B, C, D, E) do { \ E = vadd_epi32(E, K); \ E = vadd_epi32(E, vor(vand(D, B), vand(vor(D, B), C))); \ E = vadd_epi32(E, W); \ B = vroti_epi32(B, 30); \ E = vadd_epi32(E, vroti_epi32(A, 5)); \ } while (false) #endif #if SIMD_COEF_32 == 4 // Not used for AVX2 and better, which has gather instructions. #define _MM_TRANSPOSE4_EPI32(R0, R1, R2, R3) do {\ vtype T0, T1, T2, T3; \ T0 = vunpacklo_epi32(R0, R1); \ T1 = vunpacklo_epi32(R2, R3); \ T2 = vunpackhi_epi32(R0, R1); \ T3 = vunpackhi_epi32(R2, R3); \ R0 = vunpacklo_epi64(T0, T1); \ R1 = vunpackhi_epi64(T0, T1); \ R2 = vunpacklo_epi64(T2, T3); \ R3 = vunpackhi_epi64(T2, T3); \ } while (false) #endif // M and N contain the first and last 128bits of a 512bit SHA-1 message block // respectively. The remaining 256bits are always zero, and so are not stored // here to avoid the load overhead. // For AVX2, we have half a block and for AVX512/MIC we actually have a full // block. static uint32_t (*M)[VWIDTH]; static uint32_t *N; // MD contains the state of the SHA-1 A register at R75 for each of the input // messages. static uint32_t *MD; /* unused inline static uint32_t __attribute__((const)) rotateright(uint32_t value, uint8_t count) { register uint32_t result; asm("ror %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); return result; } */ inline static uint32_t __attribute__((const)) rotateleft(uint32_t value, uint8_t count) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _rotl(value, count); //((value<<count)|((uint32_t)value>>(32-count))); #elif __i386__ || __x86_64__ asm("rol %%cl, %0" : "=r" (result) : "0" (value), "c" (count)); #else // assume count <= 32 result = (value << count) | (value >> (32 - count)); #endif return result; } // GCC < 4.3 does not have __builtin_bswap32(), provide an alternative. #if !__INTEL_COMPILER && GCC_VERSION < 40300 #define __builtin_bswap32 bswap32 inline static uint32_t __attribute__((const)) bswap32(uint32_t value) { register uint32_t result; #if (__MINGW32__ || __MINGW64__) && __STRICT_ANSI__ result = _byteswap_ulong(value); #elif __i386 || __x86_64__ asm("bswap %0" : "=r" (result) : "0" (value)); #else result = (value << 24) | ((value << 8) & 0xFF0000) | (value >> 24) | ((value >> 8) & 0xFF00); #endif return result; } #endif static void sha1_fmt_init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif M = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*M), MEM_ALIGN_CACHE); N = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*N), MEM_ALIGN_CACHE); MD = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*MD), MEM_ALIGN_CACHE); } static void done(void) { MEM_FREE(MD); MEM_FREE(N); MEM_FREE(M); } static void *sha1_fmt_binary(char *ciphertext) { // Static buffer storing the binary representation of ciphertext. static union { uint32_t w[SHA1_DIGEST_WORDS]; vtype v; } result; uint32_t a75; // Convert ascii representation into binary. memcpy(result.w, rawsha1_common_get_binary(ciphertext), 20); // One preprocessing step, if we calculate E80 rol 2 here, we // can compare it against A75 and save 5 rounds in crypt_all(). a75 = rotateleft(__builtin_bswap32(result.w[4]) - 0xC3D2E1F0, 2); // Fill the vector with it, so we can do a vectorized compare result.v = vset1_epi32(a75); return result.w; } // This function is called when John wants us to buffer a crypt() operation // on the specified key. We also preprocess it for SHA-1 as we load it. // // This implementation is hardcoded to only accept passwords under 15 // characters. This is because we can create a new message block in just two // MOVDQA instructions (we need 15 instead of 16 because we must append a bit // to the message). For AVX2 it's 31 characters and for AVX-512+ it's 125. // // This routine assumes that key is not on an unmapped page boundary, but // doesn't require it to be 16 byte aligned (although that would be nice). static void sha1_fmt_set_key(char *key, int index) { vtype Z = vsetzero(); vtype X = vloadu(key); vtype B; // First, find the length of the key by scanning for a zero byte. #if (__AVX512F__ && !__AVX512BW__) || __MIC__ || __ALTIVEC__ || __ARM_NEON uint32_t len = strlen(key); #else // FIXME: even uint64_t won't be long enough for AVX-1024 uint64_t mask = vcmpeq_epi8_mask(X, Z); uint32_t len = __builtin_ctzl(mask); #endif // Create a lookup tables to find correct masks for each supported input // length. It would be nice if we could use bit shifts to produce these // dynamically, but they require an immediate operand. #if VWIDTH > 8 // FIXME: a problem with using int128 here is it won't work at // all for 32-bit builds - but that may be academic. #define XX ((((uint128_t)0xFFFFFFFFFFFFFFFFULL)<<64) + 0xFFFFFFFFFFFFFFFFULL) #define YY ((uint128_t)0x80) #define ZZ ((uint128_t)0x0) static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kTrailingBitTable[][4] = { {YY<< 0, ZZ, ZZ, ZZ}, {YY<< 8, ZZ, ZZ, ZZ}, {YY<< 16, ZZ, ZZ, ZZ}, {YY<< 24, ZZ, ZZ, ZZ}, {YY<< 32, ZZ, ZZ, ZZ}, {YY<< 40, ZZ, ZZ, ZZ}, {YY<< 48, ZZ, ZZ, ZZ}, {YY<< 56, ZZ, ZZ, ZZ}, {YY<< 64, ZZ, ZZ, ZZ}, {YY<< 72, ZZ, ZZ, ZZ}, {YY<< 80, ZZ, ZZ, ZZ}, {YY<< 88, ZZ, ZZ, ZZ}, {YY<< 96, ZZ, ZZ, ZZ}, {YY<<104, ZZ, ZZ, ZZ}, {YY<<112, ZZ, ZZ, ZZ}, {YY<<120, ZZ, ZZ, ZZ}, {ZZ, YY<< 0, ZZ, ZZ}, {ZZ, YY<< 8, ZZ, ZZ}, {ZZ, YY<< 16, ZZ, ZZ}, {ZZ, YY<< 24, ZZ, ZZ}, {ZZ, YY<< 32, ZZ, ZZ}, {ZZ, YY<< 40, ZZ, ZZ}, {ZZ, YY<< 48, ZZ, ZZ}, {ZZ, YY<< 56, ZZ, ZZ}, {ZZ, YY<< 64, ZZ, ZZ}, {ZZ, YY<< 72, ZZ, ZZ}, {ZZ, YY<< 80, ZZ, ZZ}, {ZZ, YY<< 88, ZZ, ZZ}, {ZZ, YY<< 96, ZZ, ZZ}, {ZZ, YY<<104, ZZ, ZZ}, {ZZ, YY<<112, ZZ, ZZ}, {ZZ, YY<<120, ZZ, ZZ}, {ZZ, ZZ, YY<< 0, ZZ}, {ZZ, ZZ, YY<< 8, ZZ}, {ZZ, ZZ, YY<< 16, ZZ}, {ZZ, ZZ, YY<< 24, ZZ}, {ZZ, ZZ, YY<< 32, ZZ}, {ZZ, ZZ, YY<< 40, ZZ}, {ZZ, ZZ, YY<< 48, ZZ}, {ZZ, ZZ, YY<< 56, ZZ}, {ZZ, ZZ, YY<< 64, ZZ}, {ZZ, ZZ, YY<< 72, ZZ}, {ZZ, ZZ, YY<< 80, ZZ}, {ZZ, ZZ, YY<< 88, ZZ}, {ZZ, ZZ, YY<< 96, ZZ}, {ZZ, ZZ, YY<<104, ZZ}, {ZZ, ZZ, YY<<112, ZZ}, {ZZ, ZZ, YY<<120, ZZ}, {ZZ, ZZ, ZZ, YY<< 0}, {ZZ, ZZ, ZZ, YY<< 8}, {ZZ, ZZ, ZZ, YY<< 16}, {ZZ, ZZ, ZZ, YY<< 24}, {ZZ, ZZ, ZZ, YY<< 32}, {ZZ, ZZ, ZZ, YY<< 40}, {ZZ, ZZ, ZZ, YY<< 48}, {ZZ, ZZ, ZZ, YY<< 56}, {ZZ, ZZ, ZZ, YY<< 64}, {ZZ, ZZ, ZZ, YY<< 72}, {ZZ, ZZ, ZZ, YY<< 80}, {ZZ, ZZ, ZZ, YY<< 88}, {ZZ, ZZ, ZZ, YY<< 96}, {ZZ, ZZ, ZZ, YY<<104}, {ZZ, ZZ, ZZ, YY<<112}, {ZZ, ZZ, ZZ, YY<<120} }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint128_t kUsedBytesTable[][4] = { {XX<< 0, XX, XX, XX}, {XX<< 8, XX, XX, XX}, {XX<< 16, XX, XX, XX}, {XX<< 24, XX, XX, XX}, {XX<< 32, XX, XX, XX}, {XX<< 40, XX, XX, XX}, {XX<< 48, XX, XX, XX}, {XX<< 56, XX, XX, XX}, {XX<< 64, XX, XX, XX}, {XX<< 72, XX, XX, XX}, {XX<< 80, XX, XX, XX}, {XX<< 88, XX, XX, XX}, {XX<< 96, XX, XX, XX}, {XX<<104, XX, XX, XX}, {XX<<112, XX, XX, XX}, {XX<<120, XX, XX, XX}, {ZZ, XX<< 0, XX, XX}, {ZZ, XX<< 8, XX, XX}, {ZZ, XX<< 16, XX, XX}, {ZZ, XX<< 24, XX, XX}, {ZZ, XX<< 32, XX, XX}, {ZZ, XX<< 40, XX, XX}, {ZZ, XX<< 48, XX, XX}, {ZZ, XX<< 56, XX, XX}, {ZZ, XX<< 64, XX, XX}, {ZZ, XX<< 72, XX, XX}, {ZZ, XX<< 80, XX, XX}, {ZZ, XX<< 88, XX, XX}, {ZZ, XX<< 96, XX, XX}, {ZZ, XX<<104, XX, XX}, {ZZ, XX<<112, XX, XX}, {ZZ, XX<<120, XX, XX}, {ZZ, ZZ, XX<< 0, XX}, {ZZ, ZZ, XX<< 8, XX}, {ZZ, ZZ, XX<< 16, XX}, {ZZ, ZZ, XX<< 24, XX}, {ZZ, ZZ, XX<< 32, XX}, {ZZ, ZZ, XX<< 40, XX}, {ZZ, ZZ, XX<< 48, XX}, {ZZ, ZZ, XX<< 56, XX}, {ZZ, ZZ, XX<< 64, XX}, {ZZ, ZZ, XX<< 72, XX}, {ZZ, ZZ, XX<< 80, XX}, {ZZ, ZZ, XX<< 88, XX}, {ZZ, ZZ, XX<< 96, XX}, {ZZ, ZZ, XX<<104, XX}, {ZZ, ZZ, XX<<112, XX}, {ZZ, ZZ, XX<<120, XX}, {ZZ, ZZ, ZZ, XX<< 0}, {ZZ, ZZ, ZZ, XX<< 8}, {ZZ, ZZ, ZZ, XX<< 16}, {ZZ, ZZ, ZZ, XX<< 24}, {ZZ, ZZ, ZZ, XX<< 32}, {ZZ, ZZ, ZZ, XX<< 40}, {ZZ, ZZ, ZZ, XX<< 48}, {ZZ, ZZ, ZZ, XX<< 56}, {ZZ, ZZ, ZZ, XX<< 64}, {ZZ, ZZ, ZZ, XX<< 72}, {ZZ, ZZ, ZZ, XX<< 80}, {ZZ, ZZ, ZZ, XX<< 88}, {ZZ, ZZ, ZZ, XX<< 96}, {ZZ, ZZ, ZZ, XX<<104}, {ZZ, ZZ, ZZ, XX<<112}, {ZZ, ZZ, ZZ, XX<<120} }; #elif VWIDTH > 4 static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][8] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][8] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #else static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kTrailingBitTable[][4] = { { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }, { 0x00008000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00800000, 0x00000000, 0x00000000, 0x00000000 }, { 0x80000000, 0x00000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000080, 0x00000000, 0x00000000 }, { 0x00000000, 0x00008000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00800000, 0x00000000, 0x00000000 }, { 0x00000000, 0x80000000, 0x00000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000080, 0x00000000 }, { 0x00000000, 0x00000000, 0x00008000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00800000, 0x00000000 }, { 0x00000000, 0x00000000, 0x80000000, 0x00000000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00000080 }, { 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, { 0x00000000, 0x00000000, 0x00000000, 0x00800000 }, { 0x00000000, 0x00000000, 0x00000000, 0x80000000 }, }; static const JTR_ALIGN(MEM_ALIGN_SIMD) uint32_t kUsedBytesTable[][4] = { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFFFF00, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0xFF000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFFFF00, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFFFF0000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0xFF000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFF00 }, { 0x00000000, 0x00000000, 0x00000000, 0xFFFF0000 }, { 0x00000000, 0x00000000, 0x00000000, 0xFF000000 }, }; #endif N[index] = len; // Zero out the rest of the DQWORD in X by making a suitable mask. Z = vload(kUsedBytesTable[len]); // Find the correct position for the trailing bit required by SHA-1. B = vload(kTrailingBitTable[len]); // Now we have this: // B = 00 00 00 00 00 80 00 00 00 00 00 00 00 00 00 // Z = 00 00 00 00 00 ff ff ff ff ff ff ff ff ff ff // X = 41 41 41 41 41 00 12 34 56 78 12 34 56 78 9A // <---------------> <------------------------> // key bytes w/nul junk from stack. // Use PANDN to apply the mask, then POR to append the trailing bit // required by SHA-1, which leaves us with this: // X = 41 41 41 41 41 80 00 00 00 00 00 00 00 00 00 X = vor(vandnot(Z, X), B); // SHA-1 requires us to byte swap all the 32bit words in the message, which // we do here. // X = 40 41 42 44 45 80 00 00 00 00 00 00 00 00 00 // What we have. // X = 44 42 41 40 00 00 80 45 00 00 00 00 00 00 00 // What we want. vswap32(X); // Store the result into the message buffer. vstore(&M[index], X); return; } static char *sha1_fmt_get_key(int index) { static uint32_t key[VWIDTH + 1]; int i; // This function is not hot, we can do this slowly. First, restore // endianness. for (i = 0; i < SIMD_COEF_32; i++) key[i] = __builtin_bswap32(M[index][i]); // Skip backwards until we hit the trailing bit, then remove it. memset(strrchr((char*)(key), 0x80), 0x00, 1); return (char*) key; } static int sha1_fmt_crypt_all(int *pcount, struct db_salt *salt) { uint32_t i; // Fetch crypt count from john. const int32_t count = *pcount; // To reduce the overhead of multiple function calls, we buffer lots of // passwords, and then hash them in multiples of VWIDTH all at once. #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i += VWIDTH) { vtype W[SHA1_BLOCK_WORDS]; vtype A, B, C, D, E; vtype K; #if __AVX512F__ || __MIC__ const vtype indices = vset_epi32(15<<4,14<<4,13<<4,12<<4, 11<<4,10<<4, 9<<4, 8<<4, 7<<4, 6<<4, 5<<4, 4<<4, 3<<4, 2<<4, 1<<4, 0<<4); #elif __AVX2__ const vtype indices = vset_epi32( 7<<3, 6<<3, 5<<3, 4<<3, 3<<3, 2<<3, 1<<3, 0<<3); #endif #if __AVX2__ || __MIC__ // Gather the message right into place. uint32_t j; for (j = 0; j < VWIDTH; ++j) W[j] = vgather_epi32(&M[i][j], indices, sizeof(uint32_t)); #else // AVX has no gather instructions, so load and transpose. W[0] = vload(&M[i + 0]); W[1] = vload(&M[i + 1]); W[2] = vload(&M[i + 2]); W[3] = vload(&M[i + 3]); _MM_TRANSPOSE4_EPI32(W[0], W[1], W[2], W[3]); #endif A = vset1_epi32(0x67452301); B = vset1_epi32(0xEFCDAB89); C = vset1_epi32(0x98BADCFE); D = vset1_epi32(0x10325476); E = vset1_epi32(0xC3D2E1F0); K = vset1_epi32(0x5A827999); R1(W[0], A, B, C, D, E); R1(W[1], E, A, B, C, D); R1(W[2], D, E, A, B, C); #if VWIDTH > 4 R1(W[3], C, D, E, A, B); R1(W[4], B, C, D, E, A); R1(W[5], A, B, C, D, E); // 5 R1(W[6], E, A, B, C, D); #else R1(W[3], C, D, E, A, B); W[4] = vsetzero(); R1(W[4], B, C, D, E, A); W[5] = vsetzero(); R1(W[5], A, B, C, D, E); W[6] = vsetzero(); // 5 R1(W[6], E, A, B, C, D); W[7] = vsetzero(); #endif #if VWIDTH > 8 R1(W[7], D, E, A, B, C); R1(W[8], C, D, E, A, B); R1(W[9], B, C, D, E, A); R1(W[10], A, B, C, D, E); // 10 R1(W[11], E, A, B, C, D); R1(W[12], D, E, A, B, C); R1(W[13], C, D, E, A, B); R1(W[14], B, C, D, E, A); #else R1(W[7], D, E, A, B, C); W[8] = vsetzero(); R1(W[8], C, D, E, A, B); W[9] = vsetzero(); R1(W[9], B, C, D, E, A); W[10] = vsetzero(); R1(W[10], A, B, C, D, E); W[11] = vsetzero(); // 10 R1(W[11], E, A, B, C, D); W[12] = vsetzero(); R1(W[12], D, E, A, B, C); W[13] = vsetzero(); R1(W[13], C, D, E, A, B); W[14] = vsetzero(); R1(W[14], B, C, D, E, A); #endif // Fetch the message lengths, multiply 8 (to get the length in bits). W[15] = vslli_epi32(vload(&N[i]), 3); R1(W[15], A, B, C, D, E); // 15 X(W[0], W[2], W[8], W[13]); R1(W[0], E, A, B, C, D); X(W[1], W[3], W[9], W[14]); R1(W[1], D, E, A, B, C); X(W[2], W[4], W[10], W[15]); R1(W[2], C, D, E, A, B); X(W[3], W[5], W[11], W[0]); R1(W[3], B, C, D, E, A); K = vset1_epi32(0x6ED9EBA1); X(W[4], W[6], W[12], W[1]); R2(W[4], A, B, C, D, E); // 20 X(W[5], W[7], W[13], W[2]); R2(W[5], E, A, B, C, D); X(W[6], W[8], W[14], W[3]); R2(W[6], D, E, A, B, C); X(W[7], W[9], W[15], W[4]); R2(W[7], C, D, E, A, B); X(W[8], W[10], W[0], W[5]); R2(W[8], B, C, D, E, A); X(W[9], W[11], W[1], W[6]); R2(W[9], A, B, C, D, E); // 25 X(W[10], W[12], W[2], W[7]); R2(W[10], E, A, B, C, D); X(W[11], W[13], W[3], W[8]); R2(W[11], D, E, A, B, C); X(W[12], W[14], W[4], W[9]); R2(W[12], C, D, E, A, B); X(W[13], W[15], W[5], W[10]); R2(W[13], B, C, D, E, A); X(W[14], W[0], W[6], W[11]); R2(W[14], A, B, C, D, E); // 30 X(W[15], W[1], W[7], W[12]); R2(W[15], E, A, B, C, D); X(W[0], W[2], W[8], W[13]); R2(W[0], D, E, A, B, C); X(W[1], W[3], W[9], W[14]); R2(W[1], C, D, E, A, B); X(W[2], W[4], W[10], W[15]); R2(W[2], B, C, D, E, A); X(W[3], W[5], W[11], W[0]); R2(W[3], A, B, C, D, E); // 35 X(W[4], W[6], W[12], W[1]); R2(W[4], E, A, B, C, D); X(W[5], W[7], W[13], W[2]); R2(W[5], D, E, A, B, C); X(W[6], W[8], W[14], W[3]); R2(W[6], C, D, E, A, B); X(W[7], W[9], W[15], W[4]); R2(W[7], B, C, D, E, A); K = vset1_epi32(0x8F1BBCDC); X(W[8], W[10], W[0], W[5]); R3(W[8], A, B, C, D, E); // 40 X(W[9], W[11], W[1], W[6]); R3(W[9], E, A, B, C, D); X(W[10], W[12], W[2], W[7]); R3(W[10], D, E, A, B, C); X(W[11], W[13], W[3], W[8]); R3(W[11], C, D, E, A, B); X(W[12], W[14], W[4], W[9]); R3(W[12], B, C, D, E, A); X(W[13], W[15], W[5], W[10]); R3(W[13], A, B, C, D, E); // 45 X(W[14], W[0], W[6], W[11]); R3(W[14], E, A, B, C, D); X(W[15], W[1], W[7], W[12]); R3(W[15], D, E, A, B, C); X(W[0], W[2], W[8], W[13]); R3(W[0], C, D, E, A, B); X(W[1], W[3], W[9], W[14]); R3(W[1], B, C, D, E, A); X(W[2], W[4], W[10], W[15]); R3(W[2], A, B, C, D, E); // 50 X(W[3], W[5], W[11], W[0]); R3(W[3], E, A, B, C, D); X(W[4], W[6], W[12], W[1]); R3(W[4], D, E, A, B, C); X(W[5], W[7], W[13], W[2]); R3(W[5], C, D, E, A, B); X(W[6], W[8], W[14], W[3]); R3(W[6], B, C, D, E, A); X(W[7], W[9], W[15], W[4]); R3(W[7], A, B, C, D, E); // 55 X(W[8], W[10], W[0], W[5]); R3(W[8], E, A, B, C, D); X(W[9], W[11], W[1], W[6]); R3(W[9], D, E, A, B, C); X(W[10], W[12], W[2], W[7]); R3(W[10], C, D, E, A, B); X(W[11], W[13], W[3], W[8]); R3(W[11], B, C, D, E, A); K = vset1_epi32(0xCA62C1D6); X(W[12], W[14], W[4], W[9]); R2(W[12], A, B, C, D, E); // 60 X(W[13], W[15], W[5], W[10]); R2(W[13], E, A, B, C, D); X(W[14], W[0], W[6], W[11]); R2(W[14], D, E, A, B, C); X(W[15], W[1], W[7], W[12]); R2(W[15], C, D, E, A, B); X(W[0], W[2], W[8], W[13]); R2(W[0], B, C, D, E, A); X(W[1], W[3], W[9], W[14]); R2(W[1], A, B, C, D, E); // 65 X(W[2], W[4], W[10], W[15]); R2(W[2], E, A, B, C, D); X(W[3], W[5], W[11], W[0]); R2(W[3], D, E, A, B, C); X(W[4], W[6], W[12], W[1]); R2(W[4], C, D, E, A, B); X(W[5], W[7], W[13], W[2]); R2(W[5], B, C, D, E, A); X(W[6], W[8], W[14], W[3]); R2(W[6], A, B, C, D, E); // 70 X(W[7], W[9], W[15], W[4]); R2(W[7], E, A, B, C, D); X(W[8], W[10], W[0], W[5]); R2(W[8], D, E, A, B, C); X(W[9], W[11], W[1], W[6]); R2(W[9], C, D, E, A, B); X(W[10], W[12], W[2], W[7]); R2(W[10], B, C, D, E, A); X(W[11], W[13], W[3], W[8]); R4(W[11], A, B, C, D, E); // 75 // A75 has an interesting property, it is the first word that's (almost) // part of the final MD (E79 ror 2). The common case will be that this // doesn't match, so we stop here and save 5 rounds. // // Note that I'm using E due to displacement caused by vectorization, // this is A in standard SHA-1. vstore(&MD[i], E); } return count; } static int sha1_fmt_cmp_all(void *binary, int count) { uint32_t M; uint32_t i; vtype B; // This function is hot, we need to do this quickly. We use PCMP to find // out if any of the dwords in A75 matched E in the input hash. // First, Load the target hash into an XMM register B = vloadu(binary); M = 0; #ifdef _OPENMP #pragma omp parallel for reduction(|:M) #endif // We can test for matches 4/8 at a time. As the common case will be that // there is no match, we can avoid testing it after every compare, reducing // the number of branches. // // It's hard to convince GCC that it's safe to unroll this loop, so I've // manually unrolled it a little bit. for (i = 0; i < count; i += 64) { uint32_t R = 0; #if __AVX512F__ || __MIC__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); #elif __AVX2__ R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); #else R |= vanyeq_epi32(B, vload(&MD[i + 0])); R |= vanyeq_epi32(B, vload(&MD[i + 4])); R |= vanyeq_epi32(B, vload(&MD[i + 8])); R |= vanyeq_epi32(B, vload(&MD[i + 12])); R |= vanyeq_epi32(B, vload(&MD[i + 16])); R |= vanyeq_epi32(B, vload(&MD[i + 20])); R |= vanyeq_epi32(B, vload(&MD[i + 24])); R |= vanyeq_epi32(B, vload(&MD[i + 28])); R |= vanyeq_epi32(B, vload(&MD[i + 32])); R |= vanyeq_epi32(B, vload(&MD[i + 36])); R |= vanyeq_epi32(B, vload(&MD[i + 40])); R |= vanyeq_epi32(B, vload(&MD[i + 44])); R |= vanyeq_epi32(B, vload(&MD[i + 48])); R |= vanyeq_epi32(B, vload(&MD[i + 52])); R |= vanyeq_epi32(B, vload(&MD[i + 56])); R |= vanyeq_epi32(B, vload(&MD[i + 60])); #endif M |= R; } return M; } inline static int sha1_fmt_get_hash(int index) { return MD[index]; } static int sha1_fmt_get_hash0(int index) { return sha1_fmt_get_hash(index) & PH_MASK_0; } static int sha1_fmt_get_hash1(int index) { return sha1_fmt_get_hash(index) & PH_MASK_1; } static int sha1_fmt_get_hash2(int index) { return sha1_fmt_get_hash(index) & PH_MASK_2; } static int sha1_fmt_get_hash3(int index) { return sha1_fmt_get_hash(index) & PH_MASK_3; } static int sha1_fmt_get_hash4(int index) { return sha1_fmt_get_hash(index) & PH_MASK_4; } static int sha1_fmt_get_hash5(int index) { return sha1_fmt_get_hash(index) & PH_MASK_5; } static int sha1_fmt_get_hash6(int index) { return sha1_fmt_get_hash(index) & PH_MASK_6; } inline static int sha1_fmt_get_binary(void *binary) { return *(uint32_t*)(binary); } static int sha1_fmt_binary0(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_0; } static int sha1_fmt_binary1(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_1; } static int sha1_fmt_binary2(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_2; } static int sha1_fmt_binary3(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_3; } static int sha1_fmt_binary4(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_4; } static int sha1_fmt_binary5(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_5; } static int sha1_fmt_binary6(void *binary) { return sha1_fmt_get_binary(binary) & PH_MASK_6; } static int sha1_fmt_cmp_one(void *binary, int index) { // We can quickly check if it will be worth doing a full comparison here, // this lets us turn up SHA1_PARALLEL_HASH without too much overhead when a // partial match occurs. return sha1_fmt_get_binary(binary) == sha1_fmt_get_hash(index); } // This function is not hot, and will only be called for around 1:2^32 random // crypts. Use a real SHA-1 implementation to verify the result exactly. This // routine is only called by John when cmp_one succeeds. static int sha1_fmt_cmp_exact(char *source, int index) { uint32_t full_sha1_digest[SHA1_DIGEST_WORDS]; SHA_CTX ctx; char *key; // Fetch the original input to hash. key = sha1_fmt_get_key(index); SHA1_Init(&ctx); SHA1_Update(&ctx, key, strlen(key)); SHA1_Final((unsigned char*)(full_sha1_digest), &ctx); // Compare result. return !memcmp(rawsha1_common_get_binary(source), full_sha1_digest, sizeof(full_sha1_digest)); } struct fmt_main fmt_sha1_ng = { .params = { .label = "Raw-SHA1-ng", #if VWIDTH == 16 .format_name = "(pwlen <= 55)", #if __MIC__ .algorithm_name = "SHA1 512/512 MIC 16x", #else .algorithm_name = "SHA1 512/512 AVX512 16x", #endif #elif VWIDTH == 8 .format_name = "(pwlen <= 31)", .algorithm_name = "SHA1 256/256 AVX2 8x", #else .format_name = "(pwlen <= 15)", .algorithm_name = "SHA1 128/128 " #if __ALTIVEC__ "AltiVec" #elif __ARM_NEON "NEON" #elif __XOP__ "XOP" #elif __AVX__ "AVX" #elif __SSE4_1__ "SSE4.1" #else "SSE2" #endif " 4x", #endif .benchmark_comment = "", .benchmark_length = -1, #if VWIDTH * 4 - 1 > 55 .plaintext_length = 55, #else .plaintext_length = sizeof(vtype) - 1, #endif .binary_size = sizeof(vtype), .binary_align = VWIDTH * 4, .salt_size = 0, .salt_align = 1, .min_keys_per_crypt = VWIDTH, .max_keys_per_crypt = SHA1_PARALLEL_HASH, .flags = #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, .tunable_cost_name = { NULL }, .signature = { FORMAT_TAG, FORMAT_TAG_OLD }, .tests = rawsha1_common_tests, }, .methods = { .init = sha1_fmt_init, .done = done, .reset = fmt_default_reset, .prepare = rawsha1_common_prepare, .valid = rawsha1_common_valid, .split = rawsha1_common_split, .binary = sha1_fmt_binary, .salt = fmt_default_salt, .tunable_cost_value = { NULL }, .source = fmt_default_source, .salt_hash = fmt_default_salt_hash, .set_salt = fmt_default_set_salt, .set_key = sha1_fmt_set_key, .get_key = sha1_fmt_get_key, .clear_keys = fmt_default_clear_keys, .crypt_all = sha1_fmt_crypt_all, .get_hash = { [0] = sha1_fmt_get_hash0, [1] = sha1_fmt_get_hash1, [2] = sha1_fmt_get_hash2, [3] = sha1_fmt_get_hash3, [4] = sha1_fmt_get_hash4, [5] = sha1_fmt_get_hash5, [6] = sha1_fmt_get_hash6, }, .binary_hash = { [0] = sha1_fmt_binary0, [1] = sha1_fmt_binary1, [2] = sha1_fmt_binary2, [3] = sha1_fmt_binary3, [4] = sha1_fmt_binary4, [5] = sha1_fmt_binary5, [6] = sha1_fmt_binary6, }, .cmp_all = sha1_fmt_cmp_all, .cmp_one = sha1_fmt_cmp_one, .cmp_exact = sha1_fmt_cmp_exact }, }; #endif /* plugin stanza */ #endif /* defined(SIMD_COEF_32) && (SIMD_COEF_32 < 16 || ARCH_BITS >= 64) && !_MSC_VER */
GB_binop__bor_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bor_int8 // A.*B function (eWiseMult): GB_AemultB__bor_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bor_int8 // C+=b function (dense accum): GB_Cdense_accumb__bor_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int8 // C=scalar+B GB_bind1st__bor_int8 // C=scalar+B' GB_bind1st_tran__bor_int8 // C=A+scalar GB_bind2nd__bor_int8 // C=A'+scalar GB_bind2nd_tran__bor_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) | (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bor_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bor_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bor_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bor_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bor_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bor_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB_bind1st_tran__bor_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB_bind2nd_tran__bor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bor_int8 // A.*B function (eWiseMult): GB_AemultB__bor_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bor_int8 // C+=b function (dense accum): GB_Cdense_accumb__bor_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int8 // C=scalar+B GB_bind1st__bor_int8 // C=scalar+B' GB_bind1st_tran__bor_int8 // C=A+scalar GB_bind2nd__bor_int8 // C=A'+scalar GB_bind2nd_tran__bor_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) | (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bor_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bor_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bor_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bor_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bor_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bor_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB_bind1st_tran__bor_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB_bind2nd_tran__bor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bor_int8 // A.*B function (eWiseMult): GB_AemultB__bor_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bor_int8 // C+=b function (dense accum): GB_Cdense_accumb__bor_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_int8 // C=scalar+B GB_bind1st__bor_int8 // C=scalar+B' GB_bind1st_tran__bor_int8 // C=A+scalar GB_bind2nd__bor_int8 // C=A'+scalar GB_bind2nd_tran__bor_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) | (y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_INT8 || GxB_NO_BOR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bor_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bor_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__bor_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bor_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bor_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bor_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB_bind1st_tran__bor_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB_bind2nd_tran__bor_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
row_wise_v2.c
/**** Author: Rayhan Shikder, email: shikderr@myumanitoba.ca MSc Student, Department of Computer Science, University of Manitoba, Winnipeg, MB, Canada ****/ #include<stdio.h> #include<string.h> #include <stdlib.h> #include<time.h> #include "omp.h" //macros #define ALPHABET_LENGTH 4 #define max(x,y) ((x)>(y)?(x):(y)) //global variables char *string_A; char *string_B; char *unique_chars_C; //unique alphabets int c_len; short **P_Matrix; short **DP_Results; //to store the DP values //function prototypes int get_index_of_character(char *str,char x, int len); void print_matrix(short **x, int row, int col); void calc_P_matrix_v2(short **P, char *b, int len_b, char *c, int len_c); short lcs_yang_v2(short **DP, short **P, char *A, char *B, char *C, int m, int n, int u); short lcs(short **DP, char *A, char *B, int m, int n); int get_index_of_character(char *str,char x, int len) { for(int i=0;i<len;i++) { if(str[i]== x) { return i; } } return -1;//not found the character x in str } void print_matrix(short **x, int row, int col) { for(int i=0;i<row;i++) { for(int j=0;j<col;j++) { printf("%d ",x[i][j]); } printf("\n"); } } void calc_P_matrix_v2(short **P, char *b, int len_b, char *c, int len_c) { #pragma omp parallel for for(int i=0;i<len_c;i++) { for(int j=1;j<len_b+1;j++) { if(b[j-1]==c[i]) { P[i][j] = j; } else { P[i][j] = P[i][j-1]; } } } } short lcs_yang_v2(short **DP, short **P, char *A, char *B, char *C, int m, int n, int u) { for(int i=1;i<m+1;i++) { int c_i = get_index_of_character(C,A[i-1],u); int t,s; #pragma omp parallel for private(t,s) schedule(static) for(int j=0;j<n+1;j++) { t= (0-P[c_i][j])<0; s= (0 - (DP[i-1][j] - (t*DP[i-1][P[c_i][j]-1]) )); DP[i][j] = ((t^1)||(s^0))*(DP[i-1][j]) + (!((t^1)||(s^0)))*(DP[i-1][P[c_i][j]-1] + 1); } } return DP[m][n]; } short lcs(short **DP, char *A, char *B, int m, int n) { for(int i=1;i<(m+1);i++) { for(int j=1;j<(n+1);j++) { if(A[i-1] == B[j-1]) { DP[i][j] = DP[i-1][j-1] + 1; } else { DP[i][j] = max(DP[i-1][j],DP[i][j-1]); } } } return DP[m][n]; } int main(int argc, char *argv[]) { if(argc <= 1){ printf("Error: No input file specified! Please specify the input file, and run again!\n"); return 0; } printf("\nYour input file: %s \n",argv[1]); FILE *fp; int len_a,len_b; double start_time, stop_time; fp = fopen(argv[1], "r"); fscanf(fp, "%d %d %d", &len_a, &len_b, &c_len); printf("1 : %d %d %d\n", len_a, len_b, c_len ); string_A = (char *)malloc((len_a+1) * sizeof(char *)); string_B = (char *)malloc((len_b+1) * sizeof(char *)); unique_chars_C = (char *)malloc((c_len+1) * sizeof(char *)); fscanf(fp, "%s %s %s", string_A,string_B,unique_chars_C); // printf("Strings : %s\n %s\n %s\n", string_A, string_B, unique_chars_C ); //allocate memory for DP Results DP_Results = (short **)malloc((len_a+1) * sizeof(short *)); for(int k=0;k<len_a+1;k++) { DP_Results[k] = (short *)calloc((len_b+1), sizeof(short)); } //allocate memory for P_Matrix array P_Matrix = (short **)malloc(c_len * sizeof(short *)); for(int k=0;k<c_len;k++) { P_Matrix[k] = (short *)calloc((len_b+1), sizeof(short)); } calc_P_matrix_v2(P_Matrix,string_B,len_b,unique_chars_C,c_len); //resetting DP to zero values for(int k=0;k<len_a+1;k++) { //memset(DP_Results[k],0,len_b+1); for(int l=0;l<len_b+1;l++) { DP_Results[k][l]=0; } } // printf("\n"); start_time = omp_get_wtime(); calc_P_matrix_v2(P_Matrix,string_B,len_b,unique_chars_C,c_len); int res = lcs_yang_v2(DP_Results,P_Matrix,string_A,string_B,unique_chars_C,len_a,len_b,c_len); //printf("lcs_yang_v2 is: %d\n",res); stop_time = omp_get_wtime(); printf("lcs_yang_v2 is: %d\n",res); printf("total time taken: %lf\n",stop_time-start_time); //deallocate pointers free(P_Matrix); free(DP_Results); return 0; }
/**** Author: Rayhan Shikder, email: shikderr@myumanitoba.ca MSc Student, Department of Computer Science, University of Manitoba, Winnipeg, MB, Canada ****/ #include<stdio.h> #include<string.h> #include <stdlib.h> #include<time.h> #include "omp.h" // macros #define ALPHABET_LENGTH 4 #define max(x,y) ((x)>(y)?(x):(y)) // global variables char *string_A; char *string_B; char *unique_chars_C; //unique alphabets int c_len; short **P_Matrix; short **DP_Results; //to store the DP values // function prototypes int get_index_of_character(char *str, char x, int len); void print_matrix(short **x, int row, int col); void calc_P_matrix_v2(short **P, char *b, int len_b, char *c, int len_c); short lcs_yang_v2(short **DP, short **P, char *A, char *B, char *C, int m, int n, int u); short lcs(short **DP, char *A, char *B, int m, int n); int get_index_of_character(char *str, char x, int len) { for (int i = 0; i < len; i++) { if (str[i] == x) { return i; } } return -1; //not found the character x in str } void print_matrix(short **x, int row, int col) { for (int i = 0; i < row; i++) { for (int j = 0; j < col; j++) { printf("%d ", x[i][j]); } printf("\n"); } } void calc_P_matrix_v2(short **P, char *b, int len_b, char *c, int len_c) { for (int i = 0; i < len_c; i++) { for (int j = 1; j < len_b + 1; j++) { if (b[j - 1] == c[i]) { P[i][j] = j; } else { P[i][j] = P[i][j - 1]; } } } } short lcs_yang_v2(short **DP, short **P, char *A, char *B, char *C, int m, int n, int u) { for (int i = 1; i < m + 1; i++) { int c_i = get_index_of_character(C, A[i - 1], u); int t, s; for (int j = 0; j < n + 1; j++) { t = (0 - P[c_i][j]) < 0; s = (0 - (DP[i - 1][j] - (t * DP[i - 1][P[c_i][j] - 1]))); DP[i][j] = ((t ^ 1) || (s ^ 0)) * (DP[i - 1][j]) + (!((t ^ 1) || (s ^ 0))) * (DP[i - 1][P[c_i][j] - 1] + 1); } } return DP[m][n]; } short lcs(short **DP, char *A, char *B, int m, int n) { for (int i = 1; i < (m + 1); i++) { for (int j = 1; j < (n + 1); j++) { if (A[i - 1] == B[j - 1]) { DP[i][j] = DP[i - 1][j - 1] + 1; } else { DP[i][j] = max(DP[i - 1][j], DP[i][j - 1]); } } } return DP[m][n]; } int main(int argc, char *argv[]) { if (argc <= 1) { printf("Error: No input file specified! Please specify the input file, and run again!\n"); return 0; } printf("\nYour input file: %s \n", argv[1]); FILE *fp; int len_a, len_b; double start_time, stop_time; fp = fopen(argv[1], "r"); fscanf(fp, "%d %d %d", &len_a, &len_b, &c_len); printf("1 : %d %d %d\n", len_a, len_b, c_len); string_A = (char *)malloc((len_a + 1) * sizeof(char *)); string_B = (char *)malloc((len_b + 1) * sizeof(char *)); unique_chars_C = (char *)malloc((c_len + 1) * sizeof(char *)); fscanf(fp, "%s %s %s", string_A, string_B, unique_chars_C); //printf("Strings : %s\n %s\n %s\n", string_A, string_B, unique_chars_C); //allocate memory for DP Results DP_Results = (short **)malloc((len_a + 1) * sizeof(short *)); for (int k = 0; k < len_a + 1; k++) { DP_Results[k] = (short *)calloc((len_b + 1), sizeof(short)); } //allocate memory for P_Matrix array P_Matrix = (short **)malloc(c_len * sizeof(short *)); for (int k = 0; k < c_len; k++) { P_Matrix[k] = (short *)calloc((len_b + 1), sizeof(short)); } calc_P_matrix_v2(P_Matrix, string_B, len_b, unique_chars_C, c_len); //resetting DP to zero values for (int k = 0; k < len_a + 1; k++) { //memset(DP_Results[k], 0, len_b + 1); for (int l = 0; l < len_b + 1; l++) { DP_Results[k][l] = 0; } } //printf("\n"); start_time = omp_get_wtime(); calc_P_matrix_v2(P_Matrix, string_B, len_b, unique_chars_C, c_len); int res = lcs_yang_v2(DP_Results, P_Matrix, string_A, string_B, unique_chars_C, len_a, len_b, c_len); //printf("lcs_yang_v2 is: %d\n", res); stop_time = omp_get_wtime(); printf("lcs_yang_v2 is: %d\n", res); printf("total time taken: %lf\n", stop_time - start_time); //deallocate pointers free(P_Matrix); free(DP_Results); return 0; }
/**** Author: Rayhan Shikder, email: shikderr@myumanitoba.ca MSc Student, Department of Computer Science, University of Manitoba, Winnipeg, MB, Canada ****/ #include<stdio.h> #include<string.h> #include <stdlib.h> #include<time.h> #include "omp.h" // macros #define ALPHABET_LENGTH 4 #define max(x,y) ((x)>(y)?(x):(y)) // global variables char *string_A; char *string_B; char *unique_chars_C; //unique alphabets int c_len; short **P_Matrix; short **DP_Results; //to store the DP values // function prototypes int get_index_of_character(char *str, char x, int len); void print_matrix(short **x, int row, int col); void calc_P_matrix_v2(short **P, char *b, int len_b, char *c, int len_c); short lcs_yang_v2(short **DP, short **P, char *A, char *B, char *C, int m, int n, int u); short lcs(short **DP, char *A, char *B, int m, int n); int get_index_of_character(char *str, char x, int len) { for (int i = 0; i < len; i++) { if (str[i] == x) { return i; } } return -1; //not found the character x in str } void print_matrix(short **x, int row, int col) { for (int i = 0; i < row; i++) { for (int j = 0; j < col; j++) { printf("%d ", x[i][j]); } printf("\n"); } } void calc_P_matrix_v2(short **P, char *b, int len_b, char *c, int len_c) { #pragma omp parallel for for (int i = 0; i < len_c; i++) { for (int j = 1; j < len_b + 1; j++) { if (b[j - 1] == c[i]) { P[i][j] = j; } else { P[i][j] = P[i][j - 1]; } } } } short lcs_yang_v2(short **DP, short **P, char *A, char *B, char *C, int m, int n, int u) { for (int i = 1; i < m + 1; i++) { int c_i = get_index_of_character(C, A[i - 1], u); int t, s; #pragma omp parallel for private(t,s) schedule(static) for (int j = 0; j < n + 1; j++) { t = (0 - P[c_i][j]) < 0; s = (0 - (DP[i - 1][j] - (t * DP[i - 1][P[c_i][j] - 1]))); DP[i][j] = ((t ^ 1) || (s ^ 0)) * (DP[i - 1][j]) + (!((t ^ 1) || (s ^ 0))) * (DP[i - 1][P[c_i][j] - 1] + 1); } } return DP[m][n]; } short lcs(short **DP, char *A, char *B, int m, int n) { for (int i = 1; i < (m + 1); i++) { for (int j = 1; j < (n + 1); j++) { if (A[i - 1] == B[j - 1]) { DP[i][j] = DP[i - 1][j - 1] + 1; } else { DP[i][j] = max(DP[i - 1][j], DP[i][j - 1]); } } } return DP[m][n]; } int main(int argc, char *argv[]) { if (argc <= 1) { printf("Error: No input file specified! Please specify the input file, and run again!\n"); return 0; } printf("\nYour input file: %s \n", argv[1]); FILE *fp; int len_a, len_b; double start_time, stop_time; fp = fopen(argv[1], "r"); fscanf(fp, "%d %d %d", &len_a, &len_b, &c_len); printf("1 : %d %d %d\n", len_a, len_b, c_len); string_A = (char *)malloc((len_a + 1) * sizeof(char *)); string_B = (char *)malloc((len_b + 1) * sizeof(char *)); unique_chars_C = (char *)malloc((c_len + 1) * sizeof(char *)); fscanf(fp, "%s %s %s", string_A, string_B, unique_chars_C); //printf("Strings : %s\n %s\n %s\n", string_A, string_B, unique_chars_C); //allocate memory for DP Results DP_Results = (short **)malloc((len_a + 1) * sizeof(short *)); for (int k = 0; k < len_a + 1; k++) { DP_Results[k] = (short *)calloc((len_b + 1), sizeof(short)); } //allocate memory for P_Matrix array P_Matrix = (short **)malloc(c_len * sizeof(short *)); for (int k = 0; k < c_len; k++) { P_Matrix[k] = (short *)calloc((len_b + 1), sizeof(short)); } calc_P_matrix_v2(P_Matrix, string_B, len_b, unique_chars_C, c_len); //resetting DP to zero values for (int k = 0; k < len_a + 1; k++) { //memset(DP_Results[k], 0, len_b + 1); for (int l = 0; l < len_b + 1; l++) { DP_Results[k][l] = 0; } } //printf("\n"); start_time = omp_get_wtime(); calc_P_matrix_v2(P_Matrix, string_B, len_b, unique_chars_C, c_len); int res = lcs_yang_v2(DP_Results, P_Matrix, string_A, string_B, unique_chars_C, len_a, len_b, c_len); //printf("lcs_yang_v2 is: %d\n", res); stop_time = omp_get_wtime(); printf("lcs_yang_v2 is: %d\n", res); printf("total time taken: %lf\n", stop_time - start_time); //deallocate pointers free(P_Matrix); free(DP_Results); return 0; }
my_sgemm.c
#if defined(_WIN32) #else /* * -------------------------------------------------------------------------- * BLISLAB * -------------------------------------------------------------------------- * Copyright (C) 2016, The University of Texas at Austin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * - Neither the name of The University of Texas nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * bl_sgemm.c * * * Purpose: * this is the main file of blislab sgemm. * * Todo: * * * Modification: * * * */ #include <stdio.h> #include <omp.h> #include "bl_config.h" #include "bl_sgemm_kernel.h" #include "bl_sgemm.h" #define min( i, j ) ( (i)<(j) ? (i): (j) ) inline void packA_mcxkc_d( int m, int k, float *XA, int ldXA, int offseta, float *packA ) { int i, p; float *a_pntr[ SGEMM_MR ]; for ( i = 0; i < m; i ++ ) { a_pntr[ i ] = XA + ( offseta + i ); } for ( i = m; i < SGEMM_MR; i ++ ) { a_pntr[ i ] = XA + ( offseta + 0 ); } for ( p = 0; p < k; p ++ ) { for ( i = 0; i < SGEMM_MR; i ++ ) { *packA = *a_pntr[ i ]; packA ++; a_pntr[ i ] = a_pntr[ i ] + ldXA; } } } /* * -------------------------------------------------------------------------- */ inline void packB_kcxnc_d( int n, int k, float *XB, int ldXB, // ldXB is the original k int offsetb, float *packB ) { int j, p; float *b_pntr[ SGEMM_NR ]; for ( j = 0; j < n; j ++ ) { b_pntr[ j ] = XB + ldXB * ( offsetb + j ); } for ( j = n; j < SGEMM_NR; j ++ ) { b_pntr[ j ] = XB + ldXB * ( offsetb + 0 ); } for ( p = 0; p < k; p ++ ) { for ( j = 0; j < SGEMM_NR; j ++ ) { *packB ++ = *b_pntr[ j ] ++; } } } /* * -------------------------------------------------------------------------- */ void bl_macro_kernel( int m, int n, int k, float *packA, float *packB, float *C, int ldc ) { int bl_ic_nt; int i, ii, j; aux_t aux; char *str; aux.b_next = packB; // We can also parallelize with OMP here. //// sequential is the default situation //bl_ic_nt = 1; //// check the environment variable //str = getenv( "BLISLAB_IC_NT" ); //if ( str != NULL ) { // bl_ic_nt = (int)strtol( str, NULL, 10 ); //} //#pragma omp parallel for num_threads( bl_ic_nt ) private( j, i, aux ) for ( j = 0; j < n; j += SGEMM_NR ) { // 2-th loop around micro-kernel aux.n = min( n - j, SGEMM_NR ); for ( i = 0; i < m; i += SGEMM_MR ) { // 1-th loop around micro-kernel aux.m = min( m - i, SGEMM_MR ); if ( i + SGEMM_MR >= m ) { aux.b_next += SGEMM_NR * k; } ( *bl_micro_kernel ) ( k, &packA[ i * k ], &packB[ j * k ], &C[ j * ldc + i ], (unsigned long long) ldc, &aux ); } // 1-th loop around micro-kernel } // 2-th loop around micro-kernel } // C must be aligned void bl_sgemm( int m, int n, int k, float *XA, int lda, float *XB, int ldb, float *C, // must be aligned int ldc // ldc must also be aligned ) { int i, j, p, bl_ic_nt; int ic, ib, jc, jb, pc, pb; int ir, jr; float *packA, *packB; char *str; // Early return if possible if ( m == 0 || n == 0 || k == 0 ) { printf( "bl_sgemm(): early return\n" ); return; } // sequential is the default situation bl_ic_nt = 1; // check the environment variable //str = getenv( "BLISLAB_IC_NT" ); //if ( str != NULL ) { // bl_ic_nt = (int)strtol( str, NULL, 10 ); //} // Allocate packing buffers packA = bl_malloc_aligned( SGEMM_KC, ( SGEMM_MC + 1 ) * bl_ic_nt, sizeof(float) ); packB = bl_malloc_aligned( SGEMM_KC, ( SGEMM_NC + 1 ) , sizeof(float) ); for ( jc = 0; jc < n; jc += SGEMM_NC ) { // 5-th loop around micro-kernel jb = min( n - jc, SGEMM_NC ); for ( pc = 0; pc < k; pc += SGEMM_KC ) { // 4-th loop around micro-kernel pb = min( k - pc, SGEMM_KC ); #pragma omp parallel for num_threads( bl_ic_nt ) private( jr ) for ( j = 0; j < jb; j += SGEMM_NR ) { packB_kcxnc_d( min( jb - j, SGEMM_NR ), pb, &XB[ pc ], k, // should be ldXB instead jc + j, &packB[ j * pb ] ); } #pragma omp parallel for num_threads( bl_ic_nt ) private( ic, ib, i, ir ) for ( ic = 0; ic < m; ic += SGEMM_MC ) { // 3-rd loop around micro-kernel int tid = omp_get_thread_num(); ib = min( m - ic, SGEMM_MC ); for ( i = 0; i < ib; i += SGEMM_MR ) { packA_mcxkc_d( min( ib - i, SGEMM_MR ), pb, &XA[ pc * lda ], m, ic + i, &packA[ tid * SGEMM_MC * pb + i * pb ] ); } bl_macro_kernel( ib, jb, pb, packA + tid * SGEMM_MC * pb, packB, &C[ jc * ldc + ic ], ldc ); } // End 3.rd loop around micro-kernel } // End 4.th loop around micro-kernel } // End 5.th loop around micro-kernel free( packA ); free( packB ); } #endif
#if defined(_WIN32) #else /* * -------------------------------------------------------------------------- * BLISLAB * -------------------------------------------------------------------------- * Copyright (C) 2016, The University of Texas at Austin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - Neither the name * of The University of Texas nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * * bl_sgemm.c * * * Purpose: this is the main file of blislab sgemm. * * Todo: * * * Modification: * * */ #include <stdio.h> #include <omp.h> #include "bl_config.h" #include "bl_sgemm_kernel.h" #include "bl_sgemm.h" #define min( i, j ) ( (i)<(j) ? (i): (j) ) inline void packA_mcxkc_d( int m, int k, float *XA, int ldXA, int offseta, float *packA ) { int i, p; float *a_pntr[SGEMM_MR]; for (i = 0; i < m; i++) { a_pntr[i] = XA + (offseta + i); } for (i = m; i < SGEMM_MR; i++) { a_pntr[i] = XA + (offseta + 0); } for (p = 0; p < k; p++) { for (i = 0; i < SGEMM_MR; i++) { *packA = *a_pntr[i]; packA++; a_pntr[i] = a_pntr[i] + ldXA; } } } /* * -------------------------------------------------------------------------- */ inline void packB_kcxnc_d( int n, int k, float *XB, int ldXB, //ldXB is the original k int offsetb, float *packB ) { int j, p; float *b_pntr[SGEMM_NR]; for (j = 0; j < n; j++) { b_pntr[j] = XB + ldXB * (offsetb + j); } for (j = n; j < SGEMM_NR; j++) { b_pntr[j] = XB + ldXB * (offsetb + 0); } for (p = 0; p < k; p++) { for (j = 0; j < SGEMM_NR; j++) { *packB++ = *b_pntr[j]++; } } } /* * -------------------------------------------------------------------------- */ void bl_macro_kernel( int m, int n, int k, float *packA, float *packB, float *C, int ldc ) { int bl_ic_nt; int i, ii, j; aux_t aux; char *str; aux.b_next = packB; //We can also parallelize with OMP here. //// sequential is the default situation // bl_ic_nt = 1; ////check the environment variable // str = getenv("BLISLAB_IC_NT"); //if (str != NULL) { //bl_ic_nt = (int)strtol(str, NULL, 10); // } // for (j = 0; j < n; j += SGEMM_NR) { //2 - th loop around micro - kernel aux.n = min(n - j, SGEMM_NR); for (i = 0; i < m; i += SGEMM_MR) { //1 - th loop around micro - kernel aux.m = min(m - i, SGEMM_MR); if (i + SGEMM_MR >= m) { aux.b_next += SGEMM_NR * k; } (*bl_micro_kernel) ( k, &packA[i * k], &packB[j * k], &C[j * ldc + i], (unsigned long long)ldc, &aux ); } //1 - th loop around micro - kernel } //2 - th loop around micro - kernel } //C must be aligned void bl_sgemm( int m, int n, int k, float *XA, int lda, float *XB, int ldb, float *C, //must be aligned int ldc // ldc must also be aligned ) { int i, j, p, bl_ic_nt; int ic, ib, jc, jb, pc, pb; int ir, jr; float *packA, *packB; char *str; //Early return if possible if (m == 0 || n == 0 || k == 0) { printf("bl_sgemm(): early return\n"); return; } //sequential is the default situation bl_ic_nt = 1; //check the environment variable // str = getenv("BLISLAB_IC_NT"); //if (str != NULL) { //bl_ic_nt = (int)strtol(str, NULL, 10); // } //Allocate packing buffers packA = bl_malloc_aligned(SGEMM_KC, (SGEMM_MC + 1) * bl_ic_nt, sizeof(float)); packB = bl_malloc_aligned(SGEMM_KC, (SGEMM_NC + 1), sizeof(float)); for (jc = 0; jc < n; jc += SGEMM_NC) { //5 - th loop around micro - kernel jb = min(n - jc, SGEMM_NC); for (pc = 0; pc < k; pc += SGEMM_KC) { //4 - th loop around micro - kernel pb = min(k - pc, SGEMM_KC); for (j = 0; j < jb; j += SGEMM_NR) { packB_kcxnc_d( min(jb - j, SGEMM_NR), pb, &XB[pc], k, //should be ldXB instead jc + j, &packB[j * pb] ); } for (ic = 0; ic < m; ic += SGEMM_MC) { //3 - rd loop around micro - kernel int tid = omp_get_thread_num(); ib = min(m - ic, SGEMM_MC); for (i = 0; i < ib; i += SGEMM_MR) { packA_mcxkc_d( min(ib - i, SGEMM_MR), pb, &XA[pc * lda], m, ic + i, &packA[tid * SGEMM_MC * pb + i * pb] ); } bl_macro_kernel( ib, jb, pb, packA + tid * SGEMM_MC * pb, packB, &C[jc * ldc + ic], ldc ); } //End 3. rd loop around micro - kernel } //End 4. th loop around micro - kernel } //End 5. th loop around micro - kernel free(packA); free(packB); }
#if defined(_WIN32) #else /* * -------------------------------------------------------------------------- * BLISLAB * -------------------------------------------------------------------------- * Copyright (C) 2016, The University of Texas at Austin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - Neither the name * of The University of Texas nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * * bl_sgemm.c * * * Purpose: this is the main file of blislab sgemm. * * Todo: * * * Modification: * * */ #include <stdio.h> #include <omp.h> #include "bl_config.h" #include "bl_sgemm_kernel.h" #include "bl_sgemm.h" #define min( i, j ) ( (i)<(j) ? (i): (j) ) inline void packA_mcxkc_d( int m, int k, float *XA, int ldXA, int offseta, float *packA ) { int i, p; float *a_pntr[SGEMM_MR]; for (i = 0; i < m; i++) { a_pntr[i] = XA + (offseta + i); } for (i = m; i < SGEMM_MR; i++) { a_pntr[i] = XA + (offseta + 0); } for (p = 0; p < k; p++) { for (i = 0; i < SGEMM_MR; i++) { *packA = *a_pntr[i]; packA++; a_pntr[i] = a_pntr[i] + ldXA; } } } /* * -------------------------------------------------------------------------- */ inline void packB_kcxnc_d( int n, int k, float *XB, int ldXB, //ldXB is the original k int offsetb, float *packB ) { int j, p; float *b_pntr[SGEMM_NR]; for (j = 0; j < n; j++) { b_pntr[j] = XB + ldXB * (offsetb + j); } for (j = n; j < SGEMM_NR; j++) { b_pntr[j] = XB + ldXB * (offsetb + 0); } for (p = 0; p < k; p++) { for (j = 0; j < SGEMM_NR; j++) { *packB++ = *b_pntr[j]++; } } } /* * -------------------------------------------------------------------------- */ void bl_macro_kernel( int m, int n, int k, float *packA, float *packB, float *C, int ldc ) { int bl_ic_nt; int i, ii, j; aux_t aux; char *str; aux.b_next = packB; //We can also parallelize with OMP here. //// sequential is the default situation // bl_ic_nt = 1; ////check the environment variable // str = getenv("BLISLAB_IC_NT"); //if (str != NULL) { //bl_ic_nt = (int)strtol(str, NULL, 10); // } // #pragma omp parallel for num_threads( bl_ic_nt ) private( j, i, aux ) for (j = 0; j < n; j += SGEMM_NR) { //2 - th loop around micro - kernel aux.n = min(n - j, SGEMM_NR); for (i = 0; i < m; i += SGEMM_MR) { //1 - th loop around micro - kernel aux.m = min(m - i, SGEMM_MR); if (i + SGEMM_MR >= m) { aux.b_next += SGEMM_NR * k; } (*bl_micro_kernel) ( k, &packA[i * k], &packB[j * k], &C[j * ldc + i], (unsigned long long)ldc, &aux ); } //1 - th loop around micro - kernel } //2 - th loop around micro - kernel } //C must be aligned void bl_sgemm( int m, int n, int k, float *XA, int lda, float *XB, int ldb, float *C, //must be aligned int ldc // ldc must also be aligned ) { int i, j, p, bl_ic_nt; int ic, ib, jc, jb, pc, pb; int ir, jr; float *packA, *packB; char *str; //Early return if possible if (m == 0 || n == 0 || k == 0) { printf("bl_sgemm(): early return\n"); return; } //sequential is the default situation bl_ic_nt = 1; //check the environment variable // str = getenv("BLISLAB_IC_NT"); //if (str != NULL) { //bl_ic_nt = (int)strtol(str, NULL, 10); // } //Allocate packing buffers packA = bl_malloc_aligned(SGEMM_KC, (SGEMM_MC + 1) * bl_ic_nt, sizeof(float)); packB = bl_malloc_aligned(SGEMM_KC, (SGEMM_NC + 1), sizeof(float)); for (jc = 0; jc < n; jc += SGEMM_NC) { //5 - th loop around micro - kernel jb = min(n - jc, SGEMM_NC); for (pc = 0; pc < k; pc += SGEMM_KC) { //4 - th loop around micro - kernel pb = min(k - pc, SGEMM_KC); #pragma omp parallel for num_threads( bl_ic_nt ) private( jr ) for (j = 0; j < jb; j += SGEMM_NR) { packB_kcxnc_d( min(jb - j, SGEMM_NR), pb, &XB[pc], k, //should be ldXB instead jc + j, &packB[j * pb] ); } #pragma omp parallel for num_threads( bl_ic_nt ) private( ic, ib, i, ir ) for (ic = 0; ic < m; ic += SGEMM_MC) { //3 - rd loop around micro - kernel int tid = omp_get_thread_num(); ib = min(m - ic, SGEMM_MC); for (i = 0; i < ib; i += SGEMM_MR) { packA_mcxkc_d( min(ib - i, SGEMM_MR), pb, &XA[pc * lda], m, ic + i, &packA[tid * SGEMM_MC * pb + i * pb] ); } bl_macro_kernel( ib, jb, pb, packA + tid * SGEMM_MC * pb, packB, &C[jc * ldc + ic], ldc ); } //End 3. rd loop around micro - kernel } //End 4. th loop around micro - kernel } //End 5. th loop around micro - kernel free(packA); free(packB); }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class VersionTuple; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. mutable IdentifierInfo *Ident_instancetype; /// \brief Identifier for "introduced". IdentifierInfo *Ident_introduced; /// \brief Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// \brief Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// \brief Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// \brief Identifier for "message". IdentifierInfo *Ident_message; /// \brief Identifier for "strict". IdentifierInfo *Ident_strict; /// \brief Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++0x contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// \brief When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// \brief RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } unsigned getDepth() const { return Depth; } }; /// Factory object for creating AttributeList objects. AttributeFactory AttrFactory; /// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// \brief Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion /// and balanced tokens must be handled using the specific consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren; } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square; } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace; } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion); } /// \brief Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// \brief Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed); PP.Lex(Tok); PP.EnterToken(Next); } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); return ConsumeToken(); } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) --ParenCount; // Don't let unbalanced )'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) --BracketCount; // Don't let unbalanced ]'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) --BraceCount; // Don't let unbalanced }'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// \brief Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// \brief Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// \brief Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// \brief Initialize all pragma handlers. void initializePragmaHandlers(); /// \brief Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// \brief Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// \brief Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// \brief Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// \brief Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// \brief Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// \brief Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// \brief Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// \brief Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// \brief Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// \brief Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// \brief Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// \brief Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// \brief Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// \brief Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// \brief Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// \brief Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC1); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// \brief Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// \brief The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// \brief The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// \brief Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// \brief RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// \brief Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// \brief Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// othewise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// \brief The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// \brief Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// \brief Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// \brief Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// othewise, it is a tag declaration. bool TemplateScope : 1; /// \brief Whether this class is an __interface. bool IsInterface : 1; /// \brief The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// \brief The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// \brief RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// \brief Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// \brief Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// \brief The kind of template we are parsing. enum { /// \brief We are not parsing a template at all. NonTemplate = 0, /// \brief We are parsing a template declaration. Template, /// \brief We are parsing an explicit specialization. ExplicitSpecialization, /// \brief We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// \brief The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// \brief The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// \brief The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// \brief Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers& VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, void *Info, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, std::function<void()> Completer = nullptr); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while condition expression. Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, bool AllowOpenMPStandalone = false); enum AllowedContsructsKind { /// \brief Allow any declarations, statements, OpenMP directives. ACK_Any, /// \brief Allow only statements and non-standalone OpenMP directives. ACK_StatementsOpenMPNonStandalone, /// \brief Allow statements and all executable OpenMP directives ACK_StatementsOpenMPAnyExecutable }; StmtResult ParseStatementOrDeclaration(StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs); StmtResult ParseCaseStatement(bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// \brief Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// \brief Parse the block; this code is always used. IEB_Parse, /// \brief Skip the block entirely; this code is never used. IEB_Skip, /// \brief Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// \brief Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// \brief The location of the initial keyword. SourceLocation KeywordLoc; /// \brief Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// \brief Nested-name-specifier preceding the name. CXXScopeSpec SS; /// \brief The name we're looking for. UnqualifiedId Name; /// \brief The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, AccessSpecifier& CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DSC_normal: case DSC_class: case DSC_top_level: case DSC_objc_method_result: case DSC_condition: return false; case DSC_template_type_arg: case DSC_type_specifier: case DSC_trailing: case DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(unsigned Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// \brief When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context); void ParseDeclarationSpecifiers(DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, Declarator::TheContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// \brief Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// \brief Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// \brief Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// \brief Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified); /// \brief Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// \brief Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. Error ///< Can't be any of the above! }; /// \brief Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// \brief Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *HasMissingTypename = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// \brief Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, Declarator::TheContext Context = Declarator::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); // Check for the start of a C++11 attribute-specifier-seq in a context where // an attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!getLangOpts().CPlusPlus11) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); void ProhibitAttributes(ParsedAttributesWithRange &attrs) { if (!attrs.Range.isValid()) return; DiagnoseProhibitedAttributes(attrs); attrs.clear(); } void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs); // Forbid C++11 attributes that appear on certain syntactic // locations which standard permits but we don't supported yet, // for example, attributes appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// \brief Skip C++11 attributes and return the end location of the last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// \brief Diagnose and skip C++11 attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// \brief Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// \brief Parses a C++-style attribute argument list. Returns true if this /// results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// \brief Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc, std::vector<IdentifierInfo*>& Ident, std::vector<SourceLocation>& NamespaceLoc, unsigned int index, SourceLocation& InlineLoc, ParsedAttributes& attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(unsigned Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(unsigned Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, AttributeList *Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// \brief Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// \brief Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// \brief Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// \brief Parses declarative or executable directive. /// /// \param Allowed ACK_Any, if any directives are allowed, /// ACK_StatementsOpenMPAnyExecutable - if any executable directives are /// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone /// executable directives are allowed. /// StmtResult ParseOpenMPDeclarativeOrExecutableDirective(AllowedContsructsKind Allowed); /// \brief Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// \brief Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind); /// \brief Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind); /// \brief Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind); /// \brief Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind); /// \brief Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; CXXScopeSpec ReductionIdScopeSpec; DeclarationNameInfo ReductionId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, ParsedType ObjectType, SourceLocation& TemplateKWLoc, UnqualifiedId &Result); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none, AttributeList *AccessAttrs = nullptr); Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS, AttributeList *AccessAttrs); Decl *ParseSingleDeclarationAfterTemplate( unsigned Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, AccessSpecifier AS=AS_none, AttributeList *AccessAttrs = nullptr); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); Decl *ParseTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseTypeParameter(unsigned Depth, unsigned Position); Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(TemplateTy Template, SourceLocation TemplateNameLoc, const CXXScopeSpec &SS, bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(); bool IsTemplateArgumentList(unsigned Skip = 0); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(unsigned Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(); DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class VersionTuple; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. mutable IdentifierInfo *Ident_instancetype; /// \brief Identifier for "introduced". IdentifierInfo *Ident_introduced; /// \brief Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// \brief Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// \brief Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// \brief Identifier for "message". IdentifierInfo *Ident_message; /// \brief Identifier for "strict". IdentifierInfo *Ident_strict; /// \brief Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++0x contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// \brief When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// \brief RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } unsigned getDepth() const { return Depth; } }; /// Factory object for creating AttributeList objects. AttributeFactory AttrFactory; /// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// \brief Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion /// and balanced tokens must be handled using the specific consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren; } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square; } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace; } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion); } /// \brief Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// \brief Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed); PP.Lex(Tok); PP.EnterToken(Next); } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); return ConsumeToken(); } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) --ParenCount; // Don't let unbalanced )'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) --BracketCount; // Don't let unbalanced ]'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) --BraceCount; // Don't let unbalanced }'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// \brief Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// \brief Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// \brief Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// \brief Initialize all pragma handlers. void initializePragmaHandlers(); /// \brief Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// \brief Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// \brief Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// \brief Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// \brief Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// \brief Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// \brief Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// \brief Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// \brief Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// \brief Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// \brief Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// \brief Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// \brief Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// \brief Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// \brief Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// \brief Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// \brief Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC1); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// \brief Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// \brief The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// \brief The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// \brief Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// \brief RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// \brief Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// \brief Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// othewise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// \brief The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// \brief Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// \brief Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// \brief Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// othewise, it is a tag declaration. bool TemplateScope : 1; /// \brief Whether this class is an __interface. bool IsInterface : 1; /// \brief The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// \brief The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// \brief RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// \brief Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// \brief Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// \brief The kind of template we are parsing. enum { /// \brief We are not parsing a template at all. NonTemplate = 0, /// \brief We are parsing a template declaration. Template, /// \brief We are parsing an explicit specialization. ExplicitSpecialization, /// \brief We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// \brief The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// \brief The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// \brief The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// \brief Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers& VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, void *Info, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, std::function<void()> Completer = nullptr); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while condition expression. Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, bool AllowOpenMPStandalone = false); enum AllowedContsructsKind { /// \brief Allow any declarations, statements, OpenMP directives. ACK_Any, /// \brief Allow only statements and non-standalone OpenMP directives. ACK_StatementsOpenMPNonStandalone, /// \brief Allow statements and all executable OpenMP directives ACK_StatementsOpenMPAnyExecutable }; StmtResult ParseStatementOrDeclaration(StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs); StmtResult ParseCaseStatement(bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// \brief Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// \brief Parse the block; this code is always used. IEB_Parse, /// \brief Skip the block entirely; this code is never used. IEB_Skip, /// \brief Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// \brief Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// \brief The location of the initial keyword. SourceLocation KeywordLoc; /// \brief Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// \brief Nested-name-specifier preceding the name. CXXScopeSpec SS; /// \brief The name we're looking for. UnqualifiedId Name; /// \brief The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, AccessSpecifier& CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DSC_normal: case DSC_class: case DSC_top_level: case DSC_objc_method_result: case DSC_condition: return false; case DSC_template_type_arg: case DSC_type_specifier: case DSC_trailing: case DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(unsigned Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// \brief When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context); void ParseDeclarationSpecifiers(DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, Declarator::TheContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// \brief Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// \brief Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// \brief Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// \brief Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified); /// \brief Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// \brief Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. Error ///< Can't be any of the above! }; /// \brief Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// \brief Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *HasMissingTypename = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// \brief Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, Declarator::TheContext Context = Declarator::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); // Check for the start of a C++11 attribute-specifier-seq in a context where // an attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!getLangOpts().CPlusPlus11) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); void ProhibitAttributes(ParsedAttributesWithRange &attrs) { if (!attrs.Range.isValid()) return; DiagnoseProhibitedAttributes(attrs); attrs.clear(); } void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs); // Forbid C++11 attributes that appear on certain syntactic // locations which standard permits but we don't supported yet, // for example, attributes appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// \brief Skip C++11 attributes and return the end location of the last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// \brief Diagnose and skip C++11 attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// \brief Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// \brief Parses a C++-style attribute argument list. Returns true if this /// results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// \brief Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc, std::vector<IdentifierInfo*>& Ident, std::vector<SourceLocation>& NamespaceLoc, unsigned int index, SourceLocation& InlineLoc, ParsedAttributes& attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(unsigned Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(unsigned Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, AttributeList *Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for ' DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// \brief Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// \brief Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// \brief Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// \brief Parses declarative or executable directive. /// /// \param Allowed ACK_Any, if any directives are allowed, /// ACK_StatementsOpenMPAnyExecutable - if any executable directives are /// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone /// executable directives are allowed. /// StmtResult ParseOpenMPDeclarativeOrExecutableDirective(AllowedContsructsKind Allowed); /// \brief Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// \brief Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind); /// \brief Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind); /// \brief Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind); /// \brief Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind); /// \brief Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; CXXScopeSpec ReductionIdScopeSpec; DeclarationNameInfo ReductionId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, ParsedType ObjectType, SourceLocation& TemplateKWLoc, UnqualifiedId &Result); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none, AttributeList *AccessAttrs = nullptr); Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS, AttributeList *AccessAttrs); Decl *ParseSingleDeclarationAfterTemplate( unsigned Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, AccessSpecifier AS=AS_none, AttributeList *AccessAttrs = nullptr); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); Decl *ParseTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseTypeParameter(unsigned Depth, unsigned Position); Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(TemplateTy Template, SourceLocation TemplateNameLoc, const CXXScopeSpec &SS, bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(); bool IsTemplateArgumentList(unsigned Skip = 0); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(unsigned Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(); DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class VersionTuple; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. mutable IdentifierInfo *Ident_instancetype; /// \brief Identifier for "introduced". IdentifierInfo *Ident_introduced; /// \brief Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// \brief Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// \brief Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// \brief Identifier for "message". IdentifierInfo *Ident_message; /// \brief Identifier for "strict". IdentifierInfo *Ident_strict; /// \brief Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++0x contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// \brief When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// \brief RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } unsigned getDepth() const { return Depth; } }; /// Factory object for creating AttributeList objects. AttributeFactory AttrFactory; /// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// \brief Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion /// and balanced tokens must be handled using the specific consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren; } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square; } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace; } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion); } /// \brief Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// \brief Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed); PP.Lex(Tok); PP.EnterToken(Next); } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); return ConsumeToken(); } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) --ParenCount; // Don't let unbalanced )'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) --BracketCount; // Don't let unbalanced ]'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) --BraceCount; // Don't let unbalanced }'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// \brief Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// \brief Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// \brief Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// \brief Initialize all pragma handlers. void initializePragmaHandlers(); /// \brief Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// \brief Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// \brief Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// \brief Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// \brief Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// \brief Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// \brief Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// \brief Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// \brief Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// \brief Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// \brief Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// \brief Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// \brief Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// \brief Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// \brief Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// \brief Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// \brief Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC1); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// \brief Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// \brief The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// \brief The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// \brief Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// \brief RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// \brief Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// \brief Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// othewise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// \brief The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// \brief Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// \brief Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// \brief Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// othewise, it is a tag declaration. bool TemplateScope : 1; /// \brief Whether this class is an __interface. bool IsInterface : 1; /// \brief The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// \brief The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// \brief RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// \brief Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// \brief Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// \brief The kind of template we are parsing. enum { /// \brief We are not parsing a template at all. NonTemplate = 0, /// \brief We are parsing a template declaration. Template, /// \brief We are parsing an explicit specialization. ExplicitSpecialization, /// \brief We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// \brief The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// \brief The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// \brief The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// \brief Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers& VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, void *Info, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, std::function<void()> Completer = nullptr); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while condition expression. Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, bool AllowOpenMPStandalone = false); enum AllowedContsructsKind { /// \brief Allow any declarations, statements, OpenMP directives. ACK_Any, /// \brief Allow only statements and non-standalone OpenMP directives. ACK_StatementsOpenMPNonStandalone, /// \brief Allow statements and all executable OpenMP directives ACK_StatementsOpenMPAnyExecutable }; StmtResult ParseStatementOrDeclaration(StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs); StmtResult ParseCaseStatement(bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, AllowedContsructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// \brief Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// \brief Parse the block; this code is always used. IEB_Parse, /// \brief Skip the block entirely; this code is never used. IEB_Skip, /// \brief Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// \brief Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// \brief The location of the initial keyword. SourceLocation KeywordLoc; /// \brief Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// \brief Nested-name-specifier preceding the name. CXXScopeSpec SS; /// \brief The name we're looking for. UnqualifiedId Name; /// \brief The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, AccessSpecifier& CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DSC_normal: case DSC_class: case DSC_top_level: case DSC_objc_method_result: case DSC_condition: return false; case DSC_template_type_arg: case DSC_type_specifier: case DSC_trailing: case DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(unsigned Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// \brief When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context); void ParseDeclarationSpecifiers(DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, Declarator::TheContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// \brief Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// \brief Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// \brief Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// \brief Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified); /// \brief Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// \brief Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. Error ///< Can't be any of the above! }; /// \brief Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// \brief Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *HasMissingTypename = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// \brief Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, Declarator::TheContext Context = Declarator::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); // Check for the start of a C++11 attribute-specifier-seq in a context where // an attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!getLangOpts().CPlusPlus11) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); void ProhibitAttributes(ParsedAttributesWithRange &attrs) { if (!attrs.Range.isValid()) return; DiagnoseProhibitedAttributes(attrs); attrs.clear(); } void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs); // Forbid C++11 attributes that appear on certain syntactic // locations which standard permits but we don't supported yet, // for example, attributes appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// \brief Skip C++11 attributes and return the end location of the last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// \brief Diagnose and skip C++11 attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// \brief Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// \brief Parses a C++-style attribute argument list. Returns true if this /// results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// \brief Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc, std::vector<IdentifierInfo*>& Ident, std::vector<SourceLocation>& NamespaceLoc, unsigned int index, SourceLocation& InlineLoc, ParsedAttributes& attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(unsigned Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(unsigned Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(unsigned Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, AttributeList *Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// \brief Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// \brief Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// \brief Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// \brief Parses declarative or executable directive. /// /// \param Allowed ACK_Any, if any directives are allowed, /// ACK_StatementsOpenMPAnyExecutable - if any executable directives are /// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone /// executable directives are allowed. /// StmtResult ParseOpenMPDeclarativeOrExecutableDirective(AllowedContsructsKind Allowed); /// \brief Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// \brief Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind); /// \brief Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind); /// \brief Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind); /// \brief Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind); /// \brief Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; CXXScopeSpec ReductionIdScopeSpec; DeclarationNameInfo ReductionId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, ParsedType ObjectType, SourceLocation& TemplateKWLoc, UnqualifiedId &Result); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none, AttributeList *AccessAttrs = nullptr); Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context, SourceLocation &DeclEnd, AccessSpecifier AS, AttributeList *AccessAttrs); Decl *ParseSingleDeclarationAfterTemplate( unsigned Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, AccessSpecifier AS=AS_none, AttributeList *AccessAttrs = nullptr); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<Decl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); Decl *ParseTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseTypeParameter(unsigned Depth, unsigned Position); Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(TemplateTy Template, SourceLocation TemplateNameLoc, const CXXScopeSpec &SS, bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(); bool IsTemplateArgumentList(unsigned Skip = 0); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(unsigned Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(); DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
TaskQueueRange.h
// -*- C++ -*- /*! \file TaskQueueRange.h \brief Partitioning the elements of a regular grid with a BSP tree. */ #if !defined(__concurrent_partition_TaskQueueRange_h__) #define __concurrent_partition_TaskQueueRange_h__ namespace concurrent { //! A task queue for a static range of jobs. /*! I tested this class using GCC 4.2 on a 1.66 GHz Intel Core Duo. I examined tasks with a range of costs. The task cost is measured in evaluations of the sine function. The execution times below are measured in milliseconds per task. <table> <tr> <th> Task Cost <th> 0 <th> 1 <th> 10 <th> 100 <tr> <th> 2 Threads <td> 11.3 <td> 10.8 <td> 1.25 <td> 7.90 <tr> <th> 1 Thread <td> 0.07 <td> 0.16 <td> 0.85 <td> 7.63 <tr> <th> Serial <td> 0 <td> 0.08 <td> 0.77 <td> 7.56 </table> When the tasks are very inexpensive (0 or 1 evaluations of the sine function) the contention for the tasks exacts a heavy penalty (about 11 milliseconds per task). For larger tasks (100 sine evaluations) queueing the tasks incurs a negligible overhead. For medium-sized tasks (10 sine evaluations) queueing the tasks incurs a significant overhead (about half the cost of the task). */ template < typename _ForwardIterator = int > class TaskQueueRange { // // Public types. // typedef _ForwardIterator Iterator; // // Member variables. // private: Iterator _iterator; Iterator _begin; Iterator _end; //-------------------------------------------------------------------------- //! \name Constructors etc. //@{ public: //! Default constructor. Empty task queue. TaskQueueRange() : _iterator(), _begin(), _end() {} //! Construct from the iterator range. TaskQueueRange(const Iterator begin, const Iterator end) : _iterator(begin), _begin(begin), _end(end) {} //! Destructor. ~TaskQueueRange() {} //@} //-------------------------------------------------------------------------- //! \name Accessors. //@{ public: //! Return the beginning of the index range. Iterator getBeginning() const { return _begin; } //! Return the end of the index range. Iterator getEnd() const { return _end; } //@} //-------------------------------------------------------------------------- //! \name Manipulators. //@{ public: //! Pop a task of the queue. /*! This function is thread-safe. */ Iterator pop() { Iterator result; #pragma omp critical if (_iterator != _end) { result = _iterator; ++_iterator; } else { result = _end; } return result; } //! Reset the index to the beginning of the range. /*! \note This function is not thread-safe. */ void reset() { _iterator = _begin; } //@} }; } // namespace concurrent #endif
// -*-C++ - *- /* * ! \file TaskQueueRange.h \brief Partitioning the elements of a regular * grid with a BSP tree. */ #if !defined(__concurrent_partition_TaskQueueRange_h__) #define __concurrent_partition_TaskQueueRange_h__ namespace concurrent { //!A task queue for a static range of jobs. /* * ! I tested this class using GCC 4.2 on a 1.66 GHz Intel Core Duo. * I examined tasks with a range of costs. The task cost is * measured in evaluations of the sine function. The execution * times below are measured in milliseconds per task. * * <table> <tr> <th> Task Cost <th> 0 <th> 1 <th> 10 <th> 100 <tr> * <th> 2 Threads <td> 11.3 <td> 10.8 <td> 1.25 <td> 7.90 <tr> <th> * 1 Thread <td> 0.07 <td> 0.16 <td> 0.85 <td> 7.63 <tr> <th> Serial * <td> 0 <td> 0.08 <td> 0.77 <td> 7.56 </table> * * When the tasks are very inexpensive (0 or 1 evaluations of the sine * function) the contention for the tasks exacts a heavy penalty * (about 11 milliseconds per task). For larger tasks (100 sine * evaluations) queueing the tasks incurs a negligible overhead. * For medium-sized tasks (10 sine evaluations) queueing the tasks * incurs a significant overhead (about half the cost of the task). */ template < typename _ForwardIterator = int > class TaskQueueRange { // //Public types. // typedef _ForwardIterator Iterator; // //Member variables. // private: Iterator _iterator; Iterator _begin; Iterator _end; //-------------------------------------------------------------------------- //!\name Constructors etc. //@ { public: //!Default constructor.Empty task queue. TaskQueueRange(): _iterator(), _begin(), _end() { } //!Construct from the iterator range. TaskQueueRange(const Iterator begin, const Iterator end): _iterator(begin), _begin(begin), _end(end) { } //!Destructor. ~TaskQueueRange() { } //@ } //-------------------------------------------------------------------------- //!\name Accessors. //@ { public: //!Return the beginning of the index range. Iterator getBeginning() const { return _begin; } //!Return the end of the index range. Iterator getEnd() const { return _end; } //@ } //-------------------------------------------------------------------------- //!\name Manipulators. //@ { public: //!Pop a task of the queue. /* * ! This function is thread-safe. */ Iterator pop() { Iterator result; if (_iterator != _end) { result = _iterator; ++_iterator; } else { result = _end; } return result; } //!Reset the index to the beginning of the range. /* * ! \note This function is not thread-safe. */ void reset() { _iterator = _begin; } //@ } }; } //namespace concurrent #endif /* */
// -*-C++ - *- /* * ! \file TaskQueueRange.h \brief Partitioning the elements of a regular * grid with a BSP tree. */ #if !defined(__concurrent_partition_TaskQueueRange_h__) #define __concurrent_partition_TaskQueueRange_h__ namespace concurrent { //!A task queue for a static range of jobs. /* * ! I tested this class using GCC 4.2 on a 1.66 GHz Intel Core Duo. * I examined tasks with a range of costs. The task cost is * measured in evaluations of the sine function. The execution * times below are measured in milliseconds per task. * * <table> <tr> <th> Task Cost <th> 0 <th> 1 <th> 10 <th> 100 <tr> * <th> 2 Threads <td> 11.3 <td> 10.8 <td> 1.25 <td> 7.90 <tr> <th> * 1 Thread <td> 0.07 <td> 0.16 <td> 0.85 <td> 7.63 <tr> <th> Serial * <td> 0 <td> 0.08 <td> 0.77 <td> 7.56 </table> * * When the tasks are very inexpensive (0 or 1 evaluations of the sine * function) the contention for the tasks exacts a heavy penalty * (about 11 milliseconds per task). For larger tasks (100 sine * evaluations) queueing the tasks incurs a negligible overhead. * For medium-sized tasks (10 sine evaluations) queueing the tasks * incurs a significant overhead (about half the cost of the task). */ template < typename _ForwardIterator = int > class TaskQueueRange { // //Public types. // typedef _ForwardIterator Iterator; // //Member variables. // private: Iterator _iterator; Iterator _begin; Iterator _end; //-------------------------------------------------------------------------- //!\name Constructors etc. //@ { public: //!Default constructor.Empty task queue. TaskQueueRange(): _iterator(), _begin(), _end() { } //!Construct from the iterator range. TaskQueueRange(const Iterator begin, const Iterator end): _iterator(begin), _begin(begin), _end(end) { } //!Destructor. ~TaskQueueRange() { } //@ } //-------------------------------------------------------------------------- //!\name Accessors. //@ { public: //!Return the beginning of the index range. Iterator getBeginning() const { return _begin; } //!Return the end of the index range. Iterator getEnd() const { return _end; } //@ } //-------------------------------------------------------------------------- //!\name Manipulators. //@ { public: //!Pop a task of the queue. /* * ! This function is thread-safe. */ Iterator pop() { Iterator result; #pragma omp critical if (_iterator != _end) { result = _iterator; ++_iterator; } else { result = _end; } return result; } //!Reset the index to the beginning of the range. /* * ! \note This function is not thread-safe. */ void reset() { _iterator = _begin; } //@ } }; } //namespace concurrent #endif /* */
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
/*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
/*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
energy.h
#pragma once #include "core.h" #include "geometry.h" #include "space.h" #include "potentials.h" #include "multipole.h" #include "penalty.h" #include "mpi.h" #include <Eigen/Dense> #include <set> #ifdef ENABLE_POWERSASA #include <power_sasa.h> #endif namespace Faunus { namespace Energy { class Energybase { public: enum keys {OLD, NEW, NONE}; keys key=NONE; std::string name; std::string cite; virtual double energy(Change&)=0; //!< energy due to change virtual void to_json(json &j) const;; //!< json output virtual void sync(Energybase*, Change&); virtual void init(); //!< reset and initialize virtual inline void force(std::vector<Point> &forces) {}; // update forces on all particles }; void to_json(json &j, const Energybase &base); //!< Converts any energy class to json object /** * This holds Ewald setup and must *not* depend on particle type, nor depend on Space */ struct EwaldData { typedef std::complex<double> Tcomplex; Eigen::Matrix3Xd kVectors; // k-vectors, 3xK Eigen::VectorXd Aks; // 1xK, to minimize computational effort (Eq.24,DOI:10.1063/1.481216) Eigen::VectorXcd Qion, Qdip; // 1xK double alpha, rc, kc, check_k2_zero, lB; double const_inf, eps_surf; bool spherical_sum=true; bool ipbc=false; int kVectorsInUse=0; Point L; //!< Box dimensions void update(const Point &box); }; void from_json(const json &j, EwaldData &d); void to_json(json &j, const EwaldData &d); #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - EwaldData") { using doctest::Approx; EwaldData data = R"({ "ipbc": false, "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.update( Point(10,10,10) ); CHECK(data.ipbc == false); CHECK(data.const_inf == 1); CHECK(data.alpha == 0.894427190999916); CHECK(data.kVectors.cols() == 2975); CHECK(data.Qion.size() == data.kVectors.cols()); data.ipbc=true; data.update( Point(10,10,10) ); CHECK(data.kVectors.cols() == 846); CHECK(data.Qion.size() == data.kVectors.cols()); } #endif /** @brief recipe or policies for ion-ion ewald */ template<class Tspace, bool eigenopt=false /** use Eigen matrix ops where possible */> struct PolicyIonIon { typedef typename Tspace::Tpvec::iterator iter; Tspace *spc; Tspace *old=nullptr; // set only if key==NEW at first call to `sync()` PolicyIonIon(Tspace &spc) : spc(&spc) {} void updateComplex(EwaldData &data) const { if (eigenopt) if (data.ipbc==false) { auto pos = asEigenMatrix(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::pos); // Nx3 auto charge = asEigenVector(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::charge); // Nx1 Eigen::MatrixXd kr = pos.matrix() * data.kVectors; // Nx3 * 3xK = NxK data.Qion.real() = (kr.array().cos().colwise()*charge).colwise().sum(); data.Qion.imag() = kr.array().sin().colwise().sum(); return; } for (int k=0; k<data.kVectors.cols(); k++) { const Point& kv = data.kVectors.col(k); EwaldData::Tcomplex Q(0,0); if (data.ipbc) for (auto &i : spc->p) Q += kv.cwiseProduct(i.pos).array().cos().prod() * i.charge; else for (auto &i : spc->p) { double dot = kv.dot(i.pos); Q += i.charge * EwaldData::Tcomplex( std::cos(dot), std::sin(dot) ); } data.Qion[k] = Q; } } //!< Update all k vectors void updateComplex(EwaldData &data, iter begin, iter end) const { assert(old!=nullptr); assert(spc->p.size() == old->p.size()); size_t ibeg = std::distance(spc->p.begin(), begin); // it->index size_t iend = std::distance(spc->p.begin(), end); // it->index for (int k=0; k<data.kVectors.cols(); k++) { auto& Q = data.Qion[k]; Point q = data.kVectors.col(k); if (data.ipbc) for (size_t i=ibeg; i<=iend; i++) { Q += q.cwiseProduct( spc->p[i].pos ).array().cos().prod() * spc->p[i].charge; Q -= q.cwiseProduct( old->p[i].pos ).array().cos().prod() * old->p[i].charge; } else for (size_t i=ibeg; i<=iend; i++) { double _new = q.dot(spc->p[i].pos); double _old = q.dot(old->p[i].pos); Q += spc->p[i].charge * EwaldData::Tcomplex( std::cos(_new), std::sin(_new) ); Q -= old->p[i].charge * EwaldData::Tcomplex( std::cos(_old), std::sin(_old) ); } } } //!< Optimized update of k subset. Require access to old positions through `old` pointer double selfEnergy(const EwaldData &d) { double E = 0; for (auto& i : spc->p) E += i.charge * i.charge; return -d.alpha*E / std::sqrt(pc::pi) * d.lB; } double surfaceEnergy(const EwaldData &d) { if (d.const_inf < 0.5) return 0; Point qr(0,0,0); for (auto &i : spc->p) qr += i.charge*i.pos; return d.const_inf * 2 * pc::pi / ( (2*d.eps_surf+1) * spc->geo.getVolume() ) * qr.dot(qr) * d.lB; } double reciprocalEnergy(const EwaldData &d) { double E = 0; if (eigenopt) // known at compile time E = d.Aks.cwiseProduct( d.Qion.cwiseAbs2() ).sum(); else for (int k=0; k<d.Qion.size(); k++) E += d.Aks[k] * std::norm( d.Qion[k] ); return 2 * pc::pi / spc->geo.getVolume() * E * d.lB; } }; #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - IonIonPolicy") { using doctest::Approx; typedef Space<Geometry::Cuboid, Particle<Charge,Dipole>> Tspace; Tspace spc; spc.p.resize(2); spc.geo = R"( {"length": 10} )"_json; spc.p[0] = R"( {"pos": [0,0,0], "q": 1.0} )"_json; spc.p[1] = R"( {"pos": [1,0,0], "q": -1.0} )"_json; PolicyIonIon<Tspace> ionion(spc); EwaldData data = R"({ "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.ipbc = false; // PBC Ewald (http://dx.doi.org/10.1063/1.481216) data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.21303063979675319*data.lB) ); data.ipbc = true; // IPBC Ewald data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.0865107467*data.lB) ); } #endif /** @brief Ewald summation reciprocal energy */ template<class Tspace, class Policy=PolicyIonIon<Tspace>> class Ewald : public Energybase { private: EwaldData data; Policy policy; Tspace& spc; public: Ewald(const json &j, Tspace &spc) : policy(spc), spc(spc) { name = "ewald"; data = j; init(); } void init() override { data.update( spc.geo.getLength() ); policy.updateComplex(data); // brute force. todo: be selective } double energy(Change &change) override { double u=0; if (!change.empty()) { // If the state is NEW (trial state), then update all k-vectors if (key==NEW) { if (change.all || change.dV) { // everything changes data.update( spc.geo.getLength() ); policy.updateComplex(data); // update all (expensive!) } else { if (change.groups.size()==1) { // exactly one group is moved auto& d = change.groups[0]; auto& g = spc.groups[d.index]; if (d.atoms.size()==1) // exactly one atom is moved policy.updateComplex(data, g.begin()+d.atoms[0], g.begin()+d.atoms[0]); else policy.updateComplex(data, g.begin(), g.end()); } else policy.updateComplex(data); } } u = policy.selfEnergy(data) + policy.surfaceEnergy(data) + policy.reciprocalEnergy(data); } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (other->key==OLD) policy.old = &(other->spc); // give NEW access to OLD space for optimized updates data = other->data; // copy everything! } //!< Called after a move is rejected/accepted as well as before simulation void to_json(json &j) const override { j = data; } }; template<typename Tspace> class Isobaric : public Energybase { private: Tspace& spc; double P; // P/kT public: Isobaric(const json &j, Tspace &spc) : spc(spc) { name = "isobaric"; cite = "Frenkel & Smith 2nd Ed (Eq. 5.4.13)"; P = j.value("P/mM", 0.0) * 1.0_mM; if (P<1e-10) { P = j.value("P/Pa", 0.0) * 1.0_Pa; if (P<1e-10) P = j.at("P/atm").get<double>() * 1.0_atm; } } double energy(Change &change) override { if (change.dV || change.all) { double V = spc.geo.getVolume(); size_t N=0; for (auto &g : spc.groups) if (!g.empty()) { if (g.atomic) N += g.size(); else N++; } return P*V-(N+1)*std::log(V); } else return 0; } void to_json(json &j) const override { j["P/atm"] = P / 1.0_atm; j["P/mM"] = P / 1.0_mM; j["P/Pa"] = P / 1.0_Pa; _roundjson(j,5); } }; /** * @brief Base class for external potentials * * This will apply an external energy to a defined * list of molecules, either acting on individual * atoms or the mass-center. The specific energy * function, `func` is injected in derived classes. */ template<typename Tspace> class ExternalPotential : public Energybase { protected: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; bool COM=false; // apply on center-of-mass Tspace& spc; std::set<int> molids; // molecules to act upon std::function<double(const Tparticle&)> func=nullptr; // energy of single particle std::vector<std::string> _names; template<class Tparticle> double _energy(const Group<Tparticle> &g) const { double u=0; if (molids.find(g.id) != molids.end()) { if (COM) { // apply only to center of mass Tparticle cm; cm.pos = g.cm; u = func(cm); } else { for (auto &p : g) { u += func(p); if (std::isnan(u)) break; } } } return u; } //!< External potential on a single particle public: ExternalPotential(const json &j, Tspace &spc) : spc(spc) { name="external"; COM = j.value("com", false); _names = j.at("molecules").get<decltype(_names)>(); // molecule names auto _ids = names2ids(molecules<Tpvec>, _names); // names --> molids molids = std::set<int>(_ids.begin(), _ids.end()); // vector --> set if (molids.empty() || molids.size()!=_names.size() ) throw std::runtime_error(name + ": molecule list is empty"); } double energy(Change &change) override { assert(func!=nullptr); double u=0; if (change.dV or change.all) { for (auto &g : spc.groups) { // check all groups u += _energy(g); if (std::isnan(u)) break; } } else for (auto &d : change.groups) { auto &g = spc.groups.at(d.index); // check specified groups if (d.all or COM) // check all atoms in group u += _energy(g); // _energy also checks for molecule id else { // check only specified atoms in group if (molids.find(g.id) != molids.end()) for (auto i : d.atoms) u += func( *(g.begin()+i) ); } if (std::isnan(u)) break; } return u; } void to_json(json &j) const override { j["molecules"] = _names; j["com"] = COM; } }; //!< Base class for external potentials, acting on particles /** * @brief Confines molecules inside geometric shapes */ template<typename Tspace, typename base=ExternalPotential<Tspace>> class Confine : public base { public: enum Variant {sphere, cylinder, cuboid, none}; Variant type=none; private: Point origo={0,0,0}, dir={1,1,1}; Point low, high; double radius, k; bool scale=false; std::map<std::string, Variant> m = { {"sphere", sphere}, {"cylinder", cylinder}, {"cuboid", cuboid} }; public: Confine(const json &j, Tspace &spc) : base(j,spc) { base::name = "confine"; k = value_inf(j, "k") * 1.0_kJmol; // get floating point; allow inf/-inf type = m.at( j.at("type") ); if (type==sphere or type==cylinder) { radius = j.at("radius"); origo = j.value("origo", origo); scale = j.value("scale", scale); if (type==cylinder) dir = {1,1,0}; base::func = [&radius=radius, origo=origo, k=k, dir=dir](const typename base::Tparticle &p) { double d2 = (origo-p.pos).cwiseProduct(dir).squaredNorm() - radius*radius; if (d2>0) return 0.5*k*d2; return 0.0; }; // If volume is scaled, also scale the confining radius by adding a trigger // to `Space::scaleVolume()` if (scale) spc.scaleVolumeTriggers.push_back( [&radius=radius](Tspace &spc, double Vold, double Vnew) { radius *= std::cbrt(Vnew/Vold); } ); } if (type==cuboid) { low = j.at("low").get<Point>(); high = j.at("high").get<Point>(); base::func = [low=low, high=high, k=k](const typename base::Tparticle &p) { double u=0; Point d = low-p.pos; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; d = p.pos-high; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; return 0.5*k*u; }; } } void to_json(json &j) const override { if (type==cuboid) j = {{"low", low}, {"high", high}}; if (type==sphere or type==cylinder) j = {{"radius", radius}}; if (type==sphere) { j["origo"] = origo; j["scale"] = scale; } for (auto &i : m) if (i.second==type) j["type"] = i.first; j["k"] = k/1.0_kJmol; base::to_json(j); _roundjson(j,5); } }; //!< Confine particles to a sub-region of the simulation container /* * The keys of the `intra` map are group index and the values * is a vector of `BondData`. For bonds between groups, fill * in `inter` which is evaluated for every update of call to * `energy`. * * @todo Optimize. */ template<typename Tspace> class Bonded : public Energybase { private: Tspace& spc; typedef typename Tspace::Tpvec Tpvec; typedef std::vector<std::shared_ptr<Potential::BondData>> BondVector; BondVector inter; // inter-molecular bonds std::map<int,BondVector> intra; // intra-molecular bonds void update() { using namespace Potential; intra.clear(); for (size_t i=0; i<spc.groups.size(); i++) { if (!spc.groups.empty()) { auto &g = spc.groups[i]; for (auto &b : molecules<Tpvec>.at(g.id).bonds) { intra[i].push_back( b->clone() ); // deep copy BondData from MoleculeData intra[i].back()->shift( std::distance(spc.p.begin(), g.begin()) ); Potential::setBondEnergyFunction( intra[i].back(), spc.p ); } } } } // finds and adds all intra-molecular bonds of active molecules double sum( const BondVector &v ) const { double u=0; for (auto &b : v) { assert(b->hasEnergyFunction()); u += b->energy(spc.geo.distanceFunc); } return u; } // sum energy in vector of BondData public: Bonded(const json &j, Tspace &spc) : spc(spc) { name = "bonded"; update(); if (j.is_object()) if (j.count("bondlist")==1) inter = j["bondlist"].get<BondVector>(); for (auto &i : inter) // set all energy functions Potential::setBondEnergyFunction( i, spc.p ); } void to_json(json &j) const override { if (!inter.empty()) j["bondlist"] = inter; if (!intra.empty()) { json& _j = j["bondlist-intramolecular"]; _j = json::array(); for (auto &i : intra) for (auto &b : i.second) _j.push_back(b); } } double energy(Change &c) override { double u=0; if ( !c.empty() ) { u = sum(inter); // energy of inter-molecular bonds if ( c.all || c.dV ) { for (auto& i : intra) // energy of intra-molecular bonds if (!spc.groups[i.first].empty()) // add only if group is active u += sum(i.second); } else for (auto &d : c.groups) if (d.internal) u += sum( intra[d.index] ); } return u; }; // brute force -- refine this! }; /** * @brief Nonbonded energy using a pair-potential */ template<typename Tspace, typename Tpairpot> class Nonbonded : public Energybase { private: double g2gcnt=0, g2gskip=0; protected: typedef typename Tspace::Tgroup Tgroup; double Rc2_g2g=pc::infty; void to_json(json &j) const override { j["pairpot"] = pairpot; j["cutoff_g2g"] = std::sqrt(Rc2_g2g); } template<typename T> inline bool cut(const T &g1, const T &g2) { g2gcnt++; if (g1.atomic || g2.atomic) return false; if ( spc.geo.sqdist(g1.cm, g2.cm)<Rc2_g2g ) return false; g2gskip++; return true; } //!< true if group<->group interaction can be skipped template<typename T> inline double i2i(const T &a, const T &b) { assert(&a!=&b && "a and b cannot be the same particle"); return pairpot(a, b, spc.geo.vdist(a.pos, b.pos)); } /* * Internal energy in group, calculating all with all or, if `index` * is given, only a subset. Index specifies the internal index (starting * at zero) of changed particles within the group. */ double g_internal(const Tgroup &g, const std::vector<int> &index=std::vector<int>()) { using namespace ranges; double u=0; if (index.empty()) // assume that all atoms have changed for ( auto i = g.begin(); i != g.end(); ++i ) for ( auto j=i; ++j != g.end(); ) u += i2i(*i, *j); else { // only a subset have changed auto fixed = view::ints( 0, int(g.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (int i : index) {// moved<->static for (int j : fixed ) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } for (int i : index) // moved<->moved for (int j : index) if (j>i) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } return u; } /* * Calculates the interaction energy of a particle, `i`, * and checks (1) if it is already part of Space, or (2) * external to space. */ double i2all(const typename Tspace::Tparticle &i) { double u=0; auto it = spc.findGroupContaining(i); // iterator to group if (it!=spc.groups.end()) { // check if i belongs to group in space for (auto &g : spc.groups) // i with all other particles if (&g!=&(*it)) // avoid self-interaction if (!cut(g, *it)) // check g2g cut-off for (auto &j : g) // loop over particles in other group u += i2i(i,j); for (auto &j : *it) // i with all particles in own group if (&j!=&i) u += i2i(i,j); } else // particle does not belong to any group for (auto &g : spc.groups) // i with all other *active* particles for (auto &j : g) // (this will include only active particles) u += i2i(i,j); return u; } /* * Group-to-group energy. A subset of `g1` can be given with `index` which refers * to the internal index (starting at zero) of the first group, `g1 * NOTE: the interpretation of this function is extended to also consider the mutual interactions * of a subset of each group and in such case returns sub1 <-> 2 and !sub1<->sub2, * hence excluding !sub1 <-> !sub2 in comparision to calling onconstrained g2g. In absence * of sub1 any sub2 is ignored. */ virtual double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) { using namespace ranges; double u = 0; if (!cut(g1,g2)) { if ( index.empty() && jndex.empty() ) // if index is empty, assume all in g1 have changed for (auto &i : g1) for (auto &j : g2) { u += i2i(i,j); } else {// only a subset of g1 for (auto i : index) for (auto j=g2.begin(); j!=g2.end(); ++j) { u += i2i( *(g1.begin()+i), *j); } if ( !jndex.empty() ) { auto fixed = view::ints( 0, int(g1.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (auto i : jndex) // moved2 <-| for (auto j : fixed) {// static1 <-| u += i2i( *(g2.begin()+i), *(g1.begin()+j)); } } } } return u; } public: Tspace& spc; //!< Space to operate on Tpairpot pairpot; //!< Pair potential Nonbonded(const json &j, Tspace &spc) : spc(spc) { name="nonbonded"; pairpot = j; Rc2_g2g = std::pow( j.value("cutoff_g2g", pc::infty), 2); } void force(std::vector<Point> &forces) override { auto &p = spc.p; // alias to particle vector (reference) assert(forces.size() == p.size() && "the forces size must match the particle size"); for (size_t i=0; i<p.size()-1; i++) for (size_t j=i+1; j<p.size(); j++) { Point r = spc.geo.vdist(p[i].pos, p[j].pos); // minimum distance vector Point f ;//= pairpot.force( p[i], p[j], r.squaredNorm(), r ); forces[i] += f; forces[j] -= f; } } double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.dV) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); if (i->atomic) u += g_internal(*i); } return u; } // did everything change? if (change.all) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); u += g_internal(*i); } // more todo here... return u; } // if exactly ONE molecule is changed if (change.groups.size()==1 && !change.dNpart) { auto& d = change.groups[0]; auto gindex = spc.groups.at(d.index).to_index(spc.p.begin()).first; if (d.atoms.size()==1) // exactly one atom has moved return i2all(spc.p.at(gindex+d.atoms[0])); auto& g1 = spc.groups.at(d.index); for (auto &g2 : spc.groups) if (&g1 != &g2) u += g2g(g1, g2, d.atoms); if (d.internal) u += g_internal(g1, d.atoms); return u; } // if (change.dNpart) { auto moved = change.touchedGroupIndex(); // index of moved groups std::vector<int> Moved; for (auto i: moved) Moved.push_back(i); std::sort( Moved.begin(), Moved.end() ); auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&Moved](int i){return std::binary_search(Moved.begin(), Moved.end(), i);} ); // index of static groups for ( auto cg1 = change.groups.begin(); cg1 < change.groups.end() ; ++cg1 ) { // Loop over all changed groups std::vector<int> ifiltered, jfiltered; for (auto i: cg1->atoms) { if ( i < spc.groups.at(cg1->index).size() ) ifiltered.push_back(i); } if ( !( cg1->dNpart && ifiltered.empty() ) ) // Skip if particles are removed for ( auto j : fixed) { u += g2g( spc.groups.at(cg1->index), spc.groups[j], ifiltered, jfiltered ); } for ( auto cg2 = cg1; ++cg2 != change.groups.end(); ) { for (auto i: cg2->atoms) if ( i < spc.groups.at(cg2->index).size() ) jfiltered.push_back(i); if ( !( (cg1->dNpart && ifiltered.empty()) && (cg2->dNpart && jfiltered.empty()) ) ) //Skip if particles are removed from both u += g2g( spc.groups.at(cg1->index), spc.groups.at(cg2->index), ifiltered, jfiltered ); jfiltered.clear(); } if ( ifiltered.size() != 0 ) u += g_internal( spc.groups.at( cg1->index ), ifiltered ); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) { for ( auto j=i; ++j != moved.end(); ) u += g2g( spc.groups[*i], spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(spc.groups[i], spc.groups[j]); // more todo! } return u; } }; //!< Nonbonded, pair-wise additive energy term template<typename Tspace, typename Tpairpot> class NonbondedCached : public Nonbonded<Tspace,Tpairpot> { private: typedef Nonbonded<Tspace,Tpairpot> base; typedef typename Tspace::Tgroup Tgroup; Eigen::MatrixXf cache; Tspace &spc; double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) override { int i = &g1 - &base::spc.groups.front(); int j = &g2 - &base::spc.groups.front(); if (j<i) std::swap(i,j); if (base::key==Energybase::NEW) { // if this is from the trial system, double u = 0; if (!base::cut(g1,g2)) { for (auto &i : g1) for (auto &j : g2) u += base::i2i(i,j); } cache(i,j) = u; } return cache(i,j); // return (cached) value } public: NonbondedCached(const json &j, Tspace &spc) : base(j,spc), spc(spc) { base::name += "EM"; init(); } void init() override { cache.resize( spc.groups.size(), spc.groups.size() ); cache.setZero(); for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) { int k = &(*i) - &base::spc.groups.front(); int l = &(*j) - &base::spc.groups.front(); if (l<k) std::swap(k,l); double u = 0; if (!base::cut(*i,*j)) { for (auto &k : *i) for (auto &l : *j) u += base::i2i(k,l); } cache(k,l) = u; } } } //!< Cache pair interactions in matrix double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.all || change.dV) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) u += g2g( *i, *j ); } return u; } // if exactly ONE molecule is changed if (change.groups.size()==1) { auto& d = change.groups[0]; auto& g1 = base::spc.groups.at(d.index); for (auto &g2 : base::spc.groups) { if (&g1 != &g2) u += g2g(g1, g2, d.atoms); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(base::spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) for ( auto j=i; ++j != moved.end(); ) { u += g2g( base::spc.groups[*i], base::spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(base::spc.groups[i], base::spc.groups[j]); // more todo! } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (change.all || change.dV) cache.triangularView<Eigen::StrictlyUpper>() = (other->cache).template triangularView<Eigen::StrictlyUpper>(); else for (auto &d : change.groups) { for (int i=0; i<d.index; i++) cache(i,d.index) = other->cache(i,d.index); for (size_t i=d.index+1; i<base::spc.groups.size(); i++) cache(d.index,i) = other->cache(d.index,i); } } //!< Copy energy matrix from other }; //!< Nonbonded with cached energies (Energy Matrix) /** * `udelta` is the total change of updating the energy function. If * not handled this will appear as an energy drift (which it is!). To * avoid this, this term is added to the energy but since it's the * same in both the trial and old state energies it will not affect * MC move acceptance. */ template<typename Tspace> class Penalty : public Energybase { protected: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tgroup Tgroup; typedef typename Tspace::Tpvec Tpvec; typedef typename std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> Tcoord; Tspace &spc; bool nodrift; bool quiet; size_t dim=0; size_t cnt=0; // number of calls to `sync()` size_t nupdate; // update frequency [steps] size_t samplings; size_t nconv=0; double udelta=0; // total energy change of updating penalty function double scale; // scaling factor for f0 double f0; // penalty increment std::string file, hisfile; std::vector<Tcoord> rcvec; // vector of reaction coordinate functions std::vector<double> coord; // latest reaction coordinate Table<int> histo; Table<double> penalty; public: Penalty(const json &j, Tspace &spc) : spc(spc) { using namespace ReactionCoordinate; name = "penalty"; f0 = j.value("f0", 0.5); scale = j.value("scale", 0.8); quiet = j.value("quiet", true); nupdate = j.value("update", 0); samplings = j.value("samplings", 1); nodrift = j.value("nodrift", true); file = j.at("file").get<std::string>(); hisfile = j.value("histogram", "penalty-histogram.dat"); std::vector<double> binwidth, min, max; if (scale<0 or scale>1) throw std::runtime_error("`scale` must be in the interval [0:1]"); for (auto &i : j.at("coords")) if (i.is_object()) if (i.size()==1) { std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> rc=nullptr; for (auto it=i.begin(); it!=i.end(); ++it) { if (it.key()=="atom") rc = std::make_shared<AtomProperty>(it.value(), spc); if (it.key()=="system") rc = std::make_shared<SystemProperty>(it.value(), spc); if (it.key()=="cmcm") rc = std::make_shared<MassCenterSeparation>(it.value(), spc); if (it.key()=="angle") rc = std::make_shared<PrincipalAxisAngle>(it.value(), spc); if (rc!=nullptr) { if (rc->min>=rc->max || rc->binwidth<=0) throw std::runtime_error("min<max and binwidth>0 required for '" + it.key() + "'"); rcvec.push_back(rc); binwidth.push_back( rc->binwidth ); min.push_back( rc->min ); max.push_back( rc->max ); } else throw std::runtime_error("unknown coordinate type '" + it.key() + "'"); } } dim = binwidth.size(); if (dim<1 || dim>2) throw std::runtime_error("minimum one maximum two coordinates required"); coord.resize(rcvec.size(), 0); histo.reInitializer(binwidth, min, max); penalty.reInitializer(binwidth, min, max); std::ifstream f(MPI::prefix+file); if (f) { cout << "Loading penalty function '" << MPI::prefix+file << "'" << endl; std::string hash; f >> hash >> f0 >> samplings; for (int row=0; row<penalty.rows(); row++) for (int col=0; col<penalty.cols(); col++) if (not f.eof()) f >> penalty(row,col); else throw std::runtime_error("penalty file dimension mismatch"); } } virtual ~Penalty() { std::ofstream f1(MPI::prefix + file), f2(MPI::prefix + hisfile); if (f1) f1 << "# " << f0 << " " << samplings << "\n" << penalty.array() - penalty.minCoeff() << "\n"; if (f2) f2 << histo << "\n"; // add function to save to numpy-friendly file... } void to_json(json &j) const override { j["file"] = file; j["scale"] = scale; j["update"] = nupdate; j["nodrift"] = nodrift; j["histogram"] = hisfile; j["f0_final"] = f0; auto& _j = j["coords"] = json::array(); for (auto rc : rcvec) { json t; t[rc->name] = *rc; _j.push_back(t); } } double energy(Change &change) override { assert(rcvec.size()<=coord.size()); double u=0; coord.resize( rcvec.size() ); if (!change.empty()) { for (size_t i=0; i<rcvec.size(); i++) { coord.at(i) = rcvec[i]->operator()(); if ( not rcvec[i]->inRange(coord[i]) ) return pc::infty; } penalty.to_index(coord); u = penalty[coord]; } return (nodrift) ? u - udelta : u; } /* * @todo: If this is called before `energy()`, the coord * is never calculated and causes indefined behavior */ virtual void update(const std::vector<double> &c) { if (++cnt % nupdate == 0 and f0>0) { bool b = histo.minCoeff() >= (int)samplings; if (b) { double min = penalty.minCoeff(); penalty = penalty.array() - min; if (not quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); histo.setZero(); udelta += -min; } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += f0; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); update(other->coord); // is inside allowed range other->update(other->coord); } // @todo: this doubles the MPI communication }; #ifdef ENABLE_MPI template<typename Tspace, typename Base=Penalty<Tspace>> struct PenaltyMPI : public Base { using Base::samplings; using Base::penalty; using Base::udelta; using Base::scale; using Base::histo; using Base::coord; using Base::cnt; using Base::f0; using Base::file; using Base::hisfile; using Base::nconv; Eigen::VectorXi weights; // array w. mininum histogram counts Eigen::VectorXd buffer; // receive buffer for penalty functions PenaltyMPI(const json &j, Tspace &spc) : Base(j,spc) { weights.resize( MPI::mpi.nproc() ); buffer.resize( penalty.size()*MPI::mpi.nproc() ); } void update(const std::vector<double> &c) override { using namespace Faunus::MPI; double uold = penalty[c]; if (++cnt % this->nupdate == 0 and f0>0) { int min = histo.minCoeff(); MPI_Barrier(mpi.comm); MPI_Allgather(&min, 1, MPI_INT, weights.data(), 1, MPI_INT, mpi.comm); if ( weights.maxCoeff() > samplings ) { MPI_Gather(penalty.data(), penalty.size(), MPI_DOUBLE, buffer.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); if (mpi.isMaster()) { penalty.setZero(); for (int i=0; i<mpi.nproc(); i++) penalty += Eigen::Map<Eigen::MatrixXd>( buffer.data()+i*penalty.size(), penalty.rows(), penalty.cols() ); penalty = ( penalty.array() - penalty.minCoeff() ) / double(mpi.nproc()); } MPI_Bcast(penalty.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); nconv += 1; std::ofstream f3(MPI::prefix + std::to_string(nconv) + file); if (f3) f3 << "# " << f0 << " " << samplings << "\n" << penalty.array() << endl; std::ofstream f4(MPI::prefix + std::to_string(nconv) + hisfile); if (f4) f4 << histo << endl; if (min>0 && !this->quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; histo.setZero(); f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += penalty[coord] - uold; } //!< Average penalty function across all nodes }; //!< Penalty function with MPI exchange #endif #ifdef ENABLE_POWERSASA /* * @todo: * - can only a subset of sasa be calculated? Note that it's the * `update_coord()` function that takes up most time. * - delegate to GPU? In the PowerSasa paper this is mentioned */ template<class Tspace> class SASAEnergy : public Energybase { public: std::vector<float> sasa, radii; private: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tpvec Tpvec; Tspace& spc; double probe; // sasa probe radius (angstrom) double conc=0;// co-solute concentration (mol/l) Average<double> avgArea; // average surface area std::shared_ptr<POWERSASA::PowerSasa<float,Point>> ps=nullptr; void updateSASA(const Tpvec &p) { assert(ps != nullptr); radii.resize(p.size()); std::transform(p.begin(), p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;}); ps->update_coords(spc.positions(), radii); // slowest step! for (size_t i=0; i<p.size(); i++) { auto &a = atoms<Tparticle>[p[i].id]; if (std::fabs(a.tfe)>1e-9 || std::fabs(a.tension)>1e-9) ps->calc_sasa_single(i); } sasa = ps->getSasa(); assert(sasa.size()==p.size()); } void to_json(json &j) const override { using namespace u8; j["molarity"] = conc / 1.0_molar; j["radius"] = probe / 1.0_angstrom; j[bracket("SASA")+"/"+angstrom+squared] = avgArea.avg() / 1.0_angstrom; _roundjson(j,5); } /* * @note * This is not enough as the PowerSasa object contains data * that also need syncing. It works due to the `update` (expensive!) * call in `energy`. */ void sync(Energybase *basePtr, Change &c) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) { if (c.all || c.dV) { radii = other->radii; sasa = other->sasa; } else { for (auto &d : c.groups) { int offset = std::distance(spc.p.begin(), spc.groups.at(d.index).begin()); for (int j : d.atoms) { int i = j + offset; radii[i] = other->radii[i]; sasa[i] = other->sasa[i]; } } } } } public: SASAEnergy(const json &j, Tspace &spc) : spc(spc) { name = "sasa"; cite = "doi:10.1002/jcc.21844"; probe = j.value("radius", 1.4) * 1.0_angstrom; conc = j.at("molarity").get<double>() * 1.0_molar; init(); } void init() override { radii.resize( spc.p.size() ); std::transform( spc.p.begin(), spc.p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;} ); if (ps==nullptr) ps = std::make_shared<POWERSASA::PowerSasa<float,Point>>(spc.positions(),radii); updateSASA(spc.p); } double energy(Change &change) override { double u=0, A=0; /* * ideally we want to call `update` only if `key==NEW` but * syncronising the PowerSasa object is difficult since it's * non-copyable. */ updateSASA(spc.p); // ideally we want for (size_t i=0; i<spc.p.size(); ++i) { auto &a = atoms<Tparticle>[ spc.p[i].id ]; u += sasa[i] * (a.tension + conc * a.tfe); A += sasa[i]; } avgArea+=A; // sample average area for accepted confs. only return u; } }; //!< SASA energy from transfer free energies #endif struct Example2D : public Energybase { Point& i; // reference to 1st particle in the system template<typename Tspace> Example2D(const json &j, Tspace &spc): i(spc.p.at(0).pos) { name = "Example2D"; } double energy(Change &change) override; }; template<typename Tspace> class Hamiltonian : public Energybase, public BasePointerVector<Energybase> { protected: double maxenergy=pc::infty; //!< Maximum allowed energy change typedef typename Tspace::Tparticle Tparticle; void to_json(json &j) const override { for (auto i : this->vec) j.push_back(*i); } void addEwald(const json &j, Tspace &spc) { if (j.count("coulomb")==1) if (j["coulomb"].at("type")=="ewald") push_back<Energy::Ewald<Tspace>>(j["coulomb"], spc); } //!< Adds an instance of reciprocal space Ewald energies (if appropriate) public: Hamiltonian(Tspace &spc, const json &j) { using namespace Potential; typedef CombinedPairPotential<CoulombGalore,LennardJones<Tparticle>> CoulombLJ; typedef CombinedPairPotential<CoulombGalore,HardSphere<Tparticle>> CoulombHS; typedef CombinedPairPotential<CoulombGalore,WeeksChandlerAndersen<Tparticle>> CoulombWCA; typedef CombinedPairPotential<Coulomb,WeeksChandlerAndersen<Tparticle>> PrimitiveModelWCA; Energybase::name="hamiltonian"; for (auto &m : j.at("energy")) {// loop over move list size_t oldsize = vec.size(); for (auto it=m.begin(); it!=m.end(); ++it) { try { if (it.key()=="nonbonded_coulomblj") push_back<Energy::Nonbonded<Tspace,CoulombLJ>>(it.value(), spc); if (it.key()=="nonbonded") push_back<Energy::Nonbonded<Tspace,FunctorPotential<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_coulombhs") push_back<Energy::Nonbonded<Tspace,CoulombHS>>(it.value(), spc); if (it.key()=="nonbonded_coulombwca") push_back<Energy::Nonbonded<Tspace,CoulombWCA>>(it.value(), spc); if (it.key()=="nonbonded_pmwca") push_back<Energy::Nonbonded<Tspace,PrimitiveModelWCA>>(it.value(), spc); if (it.key()=="nonbonded_deserno") push_back<Energy::NonbondedCached<Tspace,DesernoMembrane<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_desernoAA") push_back<Energy::NonbondedCached<Tspace,DesernoMembraneAA<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="bonded") push_back<Energy::Bonded<Tspace>>(it.value(), spc); if (it.key()=="confine") push_back<Energy::Confine<Tspace>>(it.value(), spc); if (it.key()=="example2d") push_back<Energy::Example2D>(it.value(), spc); if (it.key()=="isobaric") push_back<Energy::Isobaric<Tspace>>(it.value(), spc); if (it.key()=="penalty") #ifdef ENABLE_MPI push_back<Energy::PenaltyMPI<Tspace>>(it.value(), spc); #else push_back<Energy::Penalty<Tspace>>(it.value(), spc); #endif #ifdef ENABLE_POWERSASA if (it.key()=="sasa") push_back<Energy::SASAEnergy<Tspace>>(it.value(), spc); #endif // additional energies go here... addEwald(it.value(), spc); // add reciprocal Ewald terms if appropriate if (it.key()=="maxenergy") { maxenergy = it.value().get<double>(); continue; } if (vec.size()==oldsize) throw std::runtime_error("unknown term"); } catch (std::exception &e) { throw std::runtime_error("Error adding energy '" + it.key() + "': " + e.what()); } } } } double energy(Change &change) override { double du=0; for (auto i : this->vec) { i->key=key; du += i->energy(change); if (du>maxenergy) break; // stop summing energies } return du; } //!< Energy due to changes void init() override { for (auto i : this->vec) i->init(); } void sync(Energybase* basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) if (other->size()==size()) { for (size_t i=0; i<size(); i++) this->vec[i]->sync( other->vec[i].get(), change ); return; } throw std::runtime_error("hamiltonian mismatch"); } }; //!< Aggregates and sum energy terms }//namespace }//namespace
#pragma once #include "core.h" #include "geometry.h" #include "space.h" #include "potentials.h" #include "multipole.h" #include "penalty.h" #include "mpi.h" #include <Eigen/Dense> #include <set> #ifdef ENABLE_POWERSASA #include <power_sasa.h> #endif namespace Faunus { namespace Energy { class Energybase { public: enum keys {OLD, NEW, NONE}; keys key=NONE; std::string name; std::string cite; virtual double energy(Change&)=0; //!< energy due to change virtual void to_json(json &j) const;; //!< json output virtual void sync(Energybase*, Change&); virtual void init(); //!< reset and initialize virtual inline void force(std::vector<Point> &forces) {}; // update forces on all particles }; void to_json(json &j, const Energybase &base); //!< Converts any energy class to json object /** * This holds Ewald setup and must *not* depend on particle type, nor depend on Space */ struct EwaldData { typedef std::complex<double> Tcomplex; Eigen::Matrix3Xd kVectors; // k-vectors, 3xK Eigen::VectorXd Aks; // 1xK, to minimize computational effort (Eq.24,DOI:10.1063/1.481216) Eigen::VectorXcd Qion, Qdip; // 1xK double alpha, rc, kc, check_k2_zero, lB; double const_inf, eps_surf; bool spherical_sum=true; bool ipbc=false; int kVectorsInUse=0; Point L; //!< Box dimensions void update(const Point &box); }; void from_json(const json &j, EwaldData &d); void to_json(json &j, const EwaldData &d); #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - EwaldData") { using doctest::Approx; EwaldData data = R"({ "ipbc": false, "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.update( Point(10,10,10) ); CHECK(data.ipbc == false); CHECK(data.const_inf == 1); CHECK(data.alpha == 0.894427190999916); CHECK(data.kVectors.cols() == 2975); CHECK(data.Qion.size() == data.kVectors.cols()); data.ipbc=true; data.update( Point(10,10,10) ); CHECK(data.kVectors.cols() == 846); CHECK(data.Qion.size() == data.kVectors.cols()); } #endif /** @brief recipe or policies for ion-ion ewald */ template<class Tspace, bool eigenopt=false /** use Eigen matrix ops where possible */> struct PolicyIonIon { typedef typename Tspace::Tpvec::iterator iter; Tspace *spc; Tspace *old=nullptr; // set only if key==NEW at first call to `sync()` PolicyIonIon(Tspace &spc) : spc(&spc) {} void updateComplex(EwaldData &data) const { if (eigenopt) if (data.ipbc==false) { auto pos = asEigenMatrix(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::pos); // Nx3 auto charge = asEigenVector(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::charge); // Nx1 Eigen::MatrixXd kr = pos.matrix() * data.kVectors; // Nx3 * 3xK = NxK data.Qion.real() = (kr.array().cos().colwise()*charge).colwise().sum(); data.Qion.imag() = kr.array().sin().colwise().sum(); return; } for (int k=0; k<data.kVectors.cols(); k++) { const Point& kv = data.kVectors.col(k); EwaldData::Tcomplex Q(0,0); if (data.ipbc) for (auto &i : spc->p) Q += kv.cwiseProduct(i.pos).array().cos().prod() * i.charge; else for (auto &i : spc->p) { double dot = kv.dot(i.pos); Q += i.charge * EwaldData::Tcomplex( std::cos(dot), std::sin(dot) ); } data.Qion[k] = Q; } } //!< Update all k vectors void updateComplex(EwaldData &data, iter begin, iter end) const { assert(old!=nullptr); assert(spc->p.size() == old->p.size()); size_t ibeg = std::distance(spc->p.begin(), begin); // it->index size_t iend = std::distance(spc->p.begin(), end); // it->index for (int k=0; k<data.kVectors.cols(); k++) { auto& Q = data.Qion[k]; Point q = data.kVectors.col(k); if (data.ipbc) for (size_t i=ibeg; i<=iend; i++) { Q += q.cwiseProduct( spc->p[i].pos ).array().cos().prod() * spc->p[i].charge; Q -= q.cwiseProduct( old->p[i].pos ).array().cos().prod() * old->p[i].charge; } else for (size_t i=ibeg; i<=iend; i++) { double _new = q.dot(spc->p[i].pos); double _old = q.dot(old->p[i].pos); Q += spc->p[i].charge * EwaldData::Tcomplex( std::cos(_new), std::sin(_new) ); Q -= old->p[i].charge * EwaldData::Tcomplex( std::cos(_old), std::sin(_old) ); } } } //!< Optimized update of k subset. Require access to old positions through `old` pointer double selfEnergy(const EwaldData &d) { double E = 0; for (auto& i : spc->p) E += i.charge * i.charge; return -d.alpha*E / std::sqrt(pc::pi) * d.lB; } double surfaceEnergy(const EwaldData &d) { if (d.const_inf < 0.5) return 0; Point qr(0,0,0); for (auto &i : spc->p) qr += i.charge*i.pos; return d.const_inf * 2 * pc::pi / ( (2*d.eps_surf+1) * spc->geo.getVolume() ) * qr.dot(qr) * d.lB; } double reciprocalEnergy(const EwaldData &d) { double E = 0; if (eigenopt) // known at compile time E = d.Aks.cwiseProduct( d.Qion.cwiseAbs2() ).sum(); else for (int k=0; k<d.Qion.size(); k++) E += d.Aks[k] * std::norm( d.Qion[k] ); return 2 * pc::pi / spc->geo.getVolume() * E * d.lB; } }; #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - IonIonPolicy") { using doctest::Approx; typedef Space<Geometry::Cuboid, Particle<Charge,Dipole>> Tspace; Tspace spc; spc.p.resize(2); spc.geo = R"( {"length": 10} )"_json; spc.p[0] = R"( {"pos": [0,0,0], "q": 1.0} )"_json; spc.p[1] = R"( {"pos": [1,0,0], "q": -1.0} )"_json; PolicyIonIon<Tspace> ionion(spc); EwaldData data = R"({ "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.ipbc = false; // PBC Ewald (http://dx.doi.org/10.1063/1.481216) data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.21303063979675319*data.lB) ); data.ipbc = true; // IPBC Ewald data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.0865107467*data.lB) ); } #endif /** @brief Ewald summation reciprocal energy */ template<class Tspace, class Policy=PolicyIonIon<Tspace>> class Ewald : public Energybase { private: EwaldData data; Policy policy; Tspace& spc; public: Ewald(const json &j, Tspace &spc) : policy(spc), spc(spc) { name = "ewald"; data = j; init(); } void init() override { data.update( spc.geo.getLength() ); policy.updateComplex(data); // brute force. todo: be selective } double energy(Change &change) override { double u=0; if (!change.empty()) { // If the state is NEW (trial state), then update all k-vectors if (key==NEW) { if (change.all || change.dV) { // everything changes data.update( spc.geo.getLength() ); policy.updateComplex(data); // update all (expensive!) } else { if (change.groups.size()==1) { // exactly one group is moved auto& d = change.groups[0]; auto& g = spc.groups[d.index]; if (d.atoms.size()==1) // exactly one atom is moved policy.updateComplex(data, g.begin()+d.atoms[0], g.begin()+d.atoms[0]); else policy.updateComplex(data, g.begin(), g.end()); } else policy.updateComplex(data); } } u = policy.selfEnergy(data) + policy.surfaceEnergy(data) + policy.reciprocalEnergy(data); } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (other->key==OLD) policy.old = &(other->spc); // give NEW access to OLD space for optimized updates data = other->data; // copy everything! } //!< Called after a move is rejected/accepted as well as before simulation void to_json(json &j) const override { j = data; } }; template<typename Tspace> class Isobaric : public Energybase { private: Tspace& spc; double P; // P/kT public: Isobaric(const json &j, Tspace &spc) : spc(spc) { name = "isobaric"; cite = "Frenkel & Smith 2nd Ed (Eq. 5.4.13)"; P = j.value("P/mM", 0.0) * 1.0_mM; if (P<1e-10) { P = j.value("P/Pa", 0.0) * 1.0_Pa; if (P<1e-10) P = j.at("P/atm").get<double>() * 1.0_atm; } } double energy(Change &change) override { if (change.dV || change.all) { double V = spc.geo.getVolume(); size_t N=0; for (auto &g : spc.groups) if (!g.empty()) { if (g.atomic) N += g.size(); else N++; } return P*V-(N+1)*std::log(V); } else return 0; } void to_json(json &j) const override { j["P/atm"] = P / 1.0_atm; j["P/mM"] = P / 1.0_mM; j["P/Pa"] = P / 1.0_Pa; _roundjson(j,5); } }; /** * @brief Base class for external potentials * * This will apply an external energy to a defined * list of molecules, either acting on individual * atoms or the mass-center. The specific energy * function, `func` is injected in derived classes. */ template<typename Tspace> class ExternalPotential : public Energybase { protected: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; bool COM=false; // apply on center-of-mass Tspace& spc; std::set<int> molids; // molecules to act upon std::function<double(const Tparticle&)> func=nullptr; // energy of single particle std::vector<std::string> _names; template<class Tparticle> double _energy(const Group<Tparticle> &g) const { double u=0; if (molids.find(g.id) != molids.end()) { if (COM) { // apply only to center of mass Tparticle cm; cm.pos = g.cm; u = func(cm); } else { for (auto &p : g) { u += func(p); if (std::isnan(u)) break; } } } return u; } //!< External potential on a single particle public: ExternalPotential(const json &j, Tspace &spc) : spc(spc) { name="external"; COM = j.value("com", false); _names = j.at("molecules").get<decltype(_names)>(); // molecule names auto _ids = names2ids(molecules<Tpvec>, _names); // names --> molids molids = std::set<int>(_ids.begin(), _ids.end()); // vector --> set if (molids.empty() || molids.size()!=_names.size() ) throw std::runtime_error(name + ": molecule list is empty"); } double energy(Change &change) override { assert(func!=nullptr); double u=0; if (change.dV or change.all) { for (auto &g : spc.groups) { // check all groups u += _energy(g); if (std::isnan(u)) break; } } else for (auto &d : change.groups) { auto &g = spc.groups.at(d.index); // check specified groups if (d.all or COM) // check all atoms in group u += _energy(g); // _energy also checks for molecule id else { // check only specified atoms in group if (molids.find(g.id) != molids.end()) for (auto i : d.atoms) u += func( *(g.begin()+i) ); } if (std::isnan(u)) break; } return u; } void to_json(json &j) const override { j["molecules"] = _names; j["com"] = COM; } }; //!< Base class for external potentials, acting on particles /** * @brief Confines molecules inside geometric shapes */ template<typename Tspace, typename base=ExternalPotential<Tspace>> class Confine : public base { public: enum Variant {sphere, cylinder, cuboid, none}; Variant type=none; private: Point origo={0,0,0}, dir={1,1,1}; Point low, high; double radius, k; bool scale=false; std::map<std::string, Variant> m = { {"sphere", sphere}, {"cylinder", cylinder}, {"cuboid", cuboid} }; public: Confine(const json &j, Tspace &spc) : base(j,spc) { base::name = "confine"; k = value_inf(j, "k") * 1.0_kJmol; // get floating point; allow inf/-inf type = m.at( j.at("type") ); if (type==sphere or type==cylinder) { radius = j.at("radius"); origo = j.value("origo", origo); scale = j.value("scale", scale); if (type==cylinder) dir = {1,1,0}; base::func = [&radius=radius, origo=origo, k=k, dir=dir](const typename base::Tparticle &p) { double d2 = (origo-p.pos).cwiseProduct(dir).squaredNorm() - radius*radius; if (d2>0) return 0.5*k*d2; return 0.0; }; // If volume is scaled, also scale the confining radius by adding a trigger // to `Space::scaleVolume()` if (scale) spc.scaleVolumeTriggers.push_back( [&radius=radius](Tspace &spc, double Vold, double Vnew) { radius *= std::cbrt(Vnew/Vold); } ); } if (type==cuboid) { low = j.at("low").get<Point>(); high = j.at("high").get<Point>(); base::func = [low=low, high=high, k=k](const typename base::Tparticle &p) { double u=0; Point d = low-p.pos; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; d = p.pos-high; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; return 0.5*k*u; }; } } void to_json(json &j) const override { if (type==cuboid) j = {{"low", low}, {"high", high}}; if (type==sphere or type==cylinder) j = {{"radius", radius}}; if (type==sphere) { j["origo"] = origo; j["scale"] = scale; } for (auto &i : m) if (i.second==type) j["type"] = i.first; j["k"] = k/1.0_kJmol; base::to_json(j); _roundjson(j,5); } }; //!< Confine particles to a sub-region of the simulation container /* * The keys of the `intra` map are group index and the values * is a vector of `BondData`. For bonds between groups, fill * in `inter` which is evaluated for every update of call to * `energy`. * * @todo Optimize. */ template<typename Tspace> class Bonded : public Energybase { private: Tspace& spc; typedef typename Tspace::Tpvec Tpvec; typedef std::vector<std::shared_ptr<Potential::BondData>> BondVector; BondVector inter; // inter-molecular bonds std::map<int,BondVector> intra; // intra-molecular bonds void update() { using namespace Potential; intra.clear(); for (size_t i=0; i<spc.groups.size(); i++) { if (!spc.groups.empty()) { auto &g = spc.groups[i]; for (auto &b : molecules<Tpvec>.at(g.id).bonds) { intra[i].push_back( b->clone() ); // deep copy BondData from MoleculeData intra[i].back()->shift( std::distance(spc.p.begin(), g.begin()) ); Potential::setBondEnergyFunction( intra[i].back(), spc.p ); } } } } // finds and adds all intra-molecular bonds of active molecules double sum( const BondVector &v ) const { double u=0; for (auto &b : v) { assert(b->hasEnergyFunction()); u += b->energy(spc.geo.distanceFunc); } return u; } // sum energy in vector of BondData public: Bonded(const json &j, Tspace &spc) : spc(spc) { name = "bonded"; update(); if (j.is_object()) if (j.count("bondlist")==1) inter = j["bondlist"].get<BondVector>(); for (auto &i : inter) // set all energy functions Potential::setBondEnergyFunction( i, spc.p ); } void to_json(json &j) const override { if (!inter.empty()) j["bondlist"] = inter; if (!intra.empty()) { json& _j = j["bondlist-intramolecular"]; _j = json::array(); for (auto &i : intra) for (auto &b : i.second) _j.push_back(b); } } double energy(Change &c) override { double u=0; if ( !c.empty() ) { u = sum(inter); // energy of inter-molecular bonds if ( c.all || c.dV ) { for (auto& i : intra) // energy of intra-molecular bonds if (!spc.groups[i.first].empty()) // add only if group is active u += sum(i.second); } else for (auto &d : c.groups) if (d.internal) u += sum( intra[d.index] ); } return u; }; // brute force -- refine this! }; /** * @brief Nonbonded energy using a pair-potential */ template<typename Tspace, typename Tpairpot> class Nonbonded : public Energybase { private: double g2gcnt=0, g2gskip=0; protected: typedef typename Tspace::Tgroup Tgroup; double Rc2_g2g=pc::infty; void to_json(json &j) const override { j["pairpot"] = pairpot; j["cutoff_g2g"] = std::sqrt(Rc2_g2g); } template<typename T> inline bool cut(const T &g1, const T &g2) { g2gcnt++; if (g1.atomic || g2.atomic) return false; if ( spc.geo.sqdist(g1.cm, g2.cm)<Rc2_g2g ) return false; g2gskip++; return true; } //!< true if group<->group interaction can be skipped template<typename T> inline double i2i(const T &a, const T &b) { assert(&a!=&b && "a and b cannot be the same particle"); return pairpot(a, b, spc.geo.vdist(a.pos, b.pos)); } /* * Internal energy in group, calculating all with all or, if `index` * is given, only a subset. Index specifies the internal index (starting * at zero) of changed particles within the group. */ double g_internal(const Tgroup &g, const std::vector<int> &index=std::vector<int>()) { using namespace ranges; double u=0; if (index.empty()) // assume that all atoms have changed for ( auto i = g.begin(); i != g.end(); ++i ) for ( auto j=i; ++j != g.end(); ) u += i2i(*i, *j); else { // only a subset have changed auto fixed = view::ints( 0, int(g.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (int i : index) {// moved<->static for (int j : fixed ) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } for (int i : index) // moved<->moved for (int j : index) if (j>i) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } return u; } /* * Calculates the interaction energy of a particle, `i`, * and checks (1) if it is already part of Space, or (2) * external to space. */ double i2all(const typename Tspace::Tparticle &i) { double u=0; auto it = spc.findGroupContaining(i); // iterator to group if (it!=spc.groups.end()) { // check if i belongs to group in space for (auto &g : spc.groups) // i with all other particles if (&g!=&(*it)) // avoid self-interaction if (!cut(g, *it)) // check g2g cut-off for (auto &j : g) // loop over particles in other group u += i2i(i,j); for (auto &j : *it) // i with all particles in own group if (&j!=&i) u += i2i(i,j); } else // particle does not belong to any group for (auto &g : spc.groups) // i with all other *active* particles for (auto &j : g) // (this will include only active particles) u += i2i(i,j); return u; } /* * Group-to-group energy. A subset of `g1` can be given with `index` which refers * to the internal index (starting at zero) of the first group, `g1 * NOTE: the interpretation of this function is extended to also consider the mutual interactions * of a subset of each group and in such case returns sub1 <-> 2 and !sub1<->sub2, * hence excluding !sub1 <-> !sub2 in comparision to calling onconstrained g2g. In absence * of sub1 any sub2 is ignored. */ virtual double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) { using namespace ranges; double u = 0; if (!cut(g1,g2)) { if ( index.empty() && jndex.empty() ) // if index is empty, assume all in g1 have changed for (auto &i : g1) for (auto &j : g2) { u += i2i(i,j); } else {// only a subset of g1 for (auto i : index) for (auto j=g2.begin(); j!=g2.end(); ++j) { u += i2i( *(g1.begin()+i), *j); } if ( !jndex.empty() ) { auto fixed = view::ints( 0, int(g1.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (auto i : jndex) // moved2 <-| for (auto j : fixed) {// static1 <-| u += i2i( *(g2.begin()+i), *(g1.begin()+j)); } } } } return u; } public: Tspace& spc; //!< Space to operate on Tpairpot pairpot; //!< Pair potential Nonbonded(const json &j, Tspace &spc) : spc(spc) { name="nonbonded"; pairpot = j; Rc2_g2g = std::pow( j.value("cutoff_g2g", pc::infty), 2); } void force(std::vector<Point> &forces) override { auto &p = spc.p; // alias to particle vector (reference) assert(forces.size() == p.size() && "the forces size must match the particle size"); for (size_t i=0; i<p.size()-1; i++) for (size_t j=i+1; j<p.size(); j++) { Point r = spc.geo.vdist(p[i].pos, p[j].pos); // minimum distance vector Point f ;//= pairpot.force( p[i], p[j], r.squaredNorm(), r ); forces[i] += f; forces[j] -= f; } } double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.dV) { for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); if (i->atomic) u += g_internal(*i); } return u; } // did everything change? if (change.all) { for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); u += g_internal(*i); } // more todo here... return u; } // if exactly ONE molecule is changed if (change.groups.size()==1 && !change.dNpart) { auto& d = change.groups[0]; auto gindex = spc.groups.at(d.index).to_index(spc.p.begin()).first; if (d.atoms.size()==1) // exactly one atom has moved return i2all(spc.p.at(gindex+d.atoms[0])); auto& g1 = spc.groups.at(d.index); for (auto &g2 : spc.groups) if (&g1 != &g2) u += g2g(g1, g2, d.atoms); if (d.internal) u += g_internal(g1, d.atoms); return u; } // if (change.dNpart) { auto moved = change.touchedGroupIndex(); // index of moved groups std::vector<int> Moved; for (auto i: moved) Moved.push_back(i); std::sort( Moved.begin(), Moved.end() ); auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&Moved](int i){return std::binary_search(Moved.begin(), Moved.end(), i);} ); // index of static groups for ( auto cg1 = change.groups.begin(); cg1 < change.groups.end() ; ++cg1 ) { // Loop over all changed groups std::vector<int> ifiltered, jfiltered; for (auto i: cg1->atoms) { if ( i < spc.groups.at(cg1->index).size() ) ifiltered.push_back(i); } if ( !( cg1->dNpart && ifiltered.empty() ) ) // Skip if particles are removed for ( auto j : fixed) { u += g2g( spc.groups.at(cg1->index), spc.groups[j], ifiltered, jfiltered ); } for ( auto cg2 = cg1; ++cg2 != change.groups.end(); ) { for (auto i: cg2->atoms) if ( i < spc.groups.at(cg2->index).size() ) jfiltered.push_back(i); if ( !( (cg1->dNpart && ifiltered.empty()) && (cg2->dNpart && jfiltered.empty()) ) ) //Skip if particles are removed from both u += g2g( spc.groups.at(cg1->index), spc.groups.at(cg2->index), ifiltered, jfiltered ); jfiltered.clear(); } if ( ifiltered.size() != 0 ) u += g_internal( spc.groups.at( cg1->index ), ifiltered ); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) { for ( auto j=i; ++j != moved.end(); ) u += g2g( spc.groups[*i], spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(spc.groups[i], spc.groups[j]); // more todo! } return u; } }; //!< Nonbonded, pair-wise additive energy term template<typename Tspace, typename Tpairpot> class NonbondedCached : public Nonbonded<Tspace,Tpairpot> { private: typedef Nonbonded<Tspace,Tpairpot> base; typedef typename Tspace::Tgroup Tgroup; Eigen::MatrixXf cache; Tspace &spc; double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) override { int i = &g1 - &base::spc.groups.front(); int j = &g2 - &base::spc.groups.front(); if (j<i) std::swap(i,j); if (base::key==Energybase::NEW) { // if this is from the trial system, double u = 0; if (!base::cut(g1,g2)) { for (auto &i : g1) for (auto &j : g2) u += base::i2i(i,j); } cache(i,j) = u; } return cache(i,j); // return (cached) value } public: NonbondedCached(const json &j, Tspace &spc) : base(j,spc), spc(spc) { base::name += "EM"; init(); } void init() override { cache.resize( spc.groups.size(), spc.groups.size() ); cache.setZero(); for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) { int k = &(*i) - &base::spc.groups.front(); int l = &(*j) - &base::spc.groups.front(); if (l<k) std::swap(k,l); double u = 0; if (!base::cut(*i,*j)) { for (auto &k : *i) for (auto &l : *j) u += base::i2i(k,l); } cache(k,l) = u; } } } //!< Cache pair interactions in matrix double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.all || change.dV) { for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) u += g2g( *i, *j ); } return u; } // if exactly ONE molecule is changed if (change.groups.size()==1) { auto& d = change.groups[0]; auto& g1 = base::spc.groups.at(d.index); for (auto &g2 : base::spc.groups) { if (&g1 != &g2) u += g2g(g1, g2, d.atoms); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(base::spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) for ( auto j=i; ++j != moved.end(); ) { u += g2g( base::spc.groups[*i], base::spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(base::spc.groups[i], base::spc.groups[j]); // more todo! } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (change.all || change.dV) cache.triangularView<Eigen::StrictlyUpper>() = (other->cache).template triangularView<Eigen::StrictlyUpper>(); else for (auto &d : change.groups) { for (int i=0; i<d.index; i++) cache(i,d.index) = other->cache(i,d.index); for (size_t i=d.index+1; i<base::spc.groups.size(); i++) cache(d.index,i) = other->cache(d.index,i); } } //!< Copy energy matrix from other }; //!< Nonbonded with cached energies (Energy Matrix) /** * `udelta` is the total change of updating the energy function. If * not handled this will appear as an energy drift (which it is!). To * avoid this, this term is added to the energy but since it's the * same in both the trial and old state energies it will not affect * MC move acceptance. */ template<typename Tspace> class Penalty : public Energybase { protected: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tgroup Tgroup; typedef typename Tspace::Tpvec Tpvec; typedef typename std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> Tcoord; Tspace &spc; bool nodrift; bool quiet; size_t dim=0; size_t cnt=0; // number of calls to `sync()` size_t nupdate; // update frequency [steps] size_t samplings; size_t nconv=0; double udelta=0; // total energy change of updating penalty function double scale; // scaling factor for f0 double f0; // penalty increment std::string file, hisfile; std::vector<Tcoord> rcvec; // vector of reaction coordinate functions std::vector<double> coord; // latest reaction coordinate Table<int> histo; Table<double> penalty; public: Penalty(const json &j, Tspace &spc) : spc(spc) { using namespace ReactionCoordinate; name = "penalty"; f0 = j.value("f0", 0.5); scale = j.value("scale", 0.8); quiet = j.value("quiet", true); nupdate = j.value("update", 0); samplings = j.value("samplings", 1); nodrift = j.value("nodrift", true); file = j.at("file").get<std::string>(); hisfile = j.value("histogram", "penalty-histogram.dat"); std::vector<double> binwidth, min, max; if (scale<0 or scale>1) throw std::runtime_error("`scale` must be in the interval [0:1]"); for (auto &i : j.at("coords")) if (i.is_object()) if (i.size()==1) { std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> rc=nullptr; for (auto it=i.begin(); it!=i.end(); ++it) { if (it.key()=="atom") rc = std::make_shared<AtomProperty>(it.value(), spc); if (it.key()=="system") rc = std::make_shared<SystemProperty>(it.value(), spc); if (it.key()=="cmcm") rc = std::make_shared<MassCenterSeparation>(it.value(), spc); if (it.key()=="angle") rc = std::make_shared<PrincipalAxisAngle>(it.value(), spc); if (rc!=nullptr) { if (rc->min>=rc->max || rc->binwidth<=0) throw std::runtime_error("min<max and binwidth>0 required for '" + it.key() + "'"); rcvec.push_back(rc); binwidth.push_back( rc->binwidth ); min.push_back( rc->min ); max.push_back( rc->max ); } else throw std::runtime_error("unknown coordinate type '" + it.key() + "'"); } } dim = binwidth.size(); if (dim<1 || dim>2) throw std::runtime_error("minimum one maximum two coordinates required"); coord.resize(rcvec.size(), 0); histo.reInitializer(binwidth, min, max); penalty.reInitializer(binwidth, min, max); std::ifstream f(MPI::prefix+file); if (f) { cout << "Loading penalty function '" << MPI::prefix+file << "'" << endl; std::string hash; f >> hash >> f0 >> samplings; for (int row=0; row<penalty.rows(); row++) for (int col=0; col<penalty.cols(); col++) if (not f.eof()) f >> penalty(row,col); else throw std::runtime_error("penalty file dimension mismatch"); } } virtual ~Penalty() { std::ofstream f1(MPI::prefix + file), f2(MPI::prefix + hisfile); if (f1) f1 << "# " << f0 << " " << samplings << "\n" << penalty.array() - penalty.minCoeff() << "\n"; if (f2) f2 << histo << "\n"; // add function to save to numpy-friendly file... } void to_json(json &j) const override { j["file"] = file; j["scale"] = scale; j["update"] = nupdate; j["nodrift"] = nodrift; j["histogram"] = hisfile; j["f0_final"] = f0; auto& _j = j["coords"] = json::array(); for (auto rc : rcvec) { json t; t[rc->name] = *rc; _j.push_back(t); } } double energy(Change &change) override { assert(rcvec.size()<=coord.size()); double u=0; coord.resize( rcvec.size() ); if (!change.empty()) { for (size_t i=0; i<rcvec.size(); i++) { coord.at(i) = rcvec[i]->operator()(); if ( not rcvec[i]->inRange(coord[i]) ) return pc::infty; } penalty.to_index(coord); u = penalty[coord]; } return (nodrift) ? u - udelta : u; } /* * @todo: If this is called before `energy()`, the coord * is never calculated and causes indefined behavior */ virtual void update(const std::vector<double> &c) { if (++cnt % nupdate == 0 and f0>0) { bool b = histo.minCoeff() >= (int)samplings; if (b) { double min = penalty.minCoeff(); penalty = penalty.array() - min; if (not quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); histo.setZero(); udelta += -min; } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += f0; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); update(other->coord); // is inside allowed range other->update(other->coord); } // @todo: this doubles the MPI communication }; #ifdef ENABLE_MPI template<typename Tspace, typename Base=Penalty<Tspace>> struct PenaltyMPI : public Base { using Base::samplings; using Base::penalty; using Base::udelta; using Base::scale; using Base::histo; using Base::coord; using Base::cnt; using Base::f0; using Base::file; using Base::hisfile; using Base::nconv; Eigen::VectorXi weights; // array w. mininum histogram counts Eigen::VectorXd buffer; // receive buffer for penalty functions PenaltyMPI(const json &j, Tspace &spc) : Base(j,spc) { weights.resize( MPI::mpi.nproc() ); buffer.resize( penalty.size()*MPI::mpi.nproc() ); } void update(const std::vector<double> &c) override { using namespace Faunus::MPI; double uold = penalty[c]; if (++cnt % this->nupdate == 0 and f0>0) { int min = histo.minCoeff(); MPI_Barrier(mpi.comm); MPI_Allgather(&min, 1, MPI_INT, weights.data(), 1, MPI_INT, mpi.comm); if ( weights.maxCoeff() > samplings ) { MPI_Gather(penalty.data(), penalty.size(), MPI_DOUBLE, buffer.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); if (mpi.isMaster()) { penalty.setZero(); for (int i=0; i<mpi.nproc(); i++) penalty += Eigen::Map<Eigen::MatrixXd>( buffer.data()+i*penalty.size(), penalty.rows(), penalty.cols() ); penalty = ( penalty.array() - penalty.minCoeff() ) / double(mpi.nproc()); } MPI_Bcast(penalty.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); nconv += 1; std::ofstream f3(MPI::prefix + std::to_string(nconv) + file); if (f3) f3 << "# " << f0 << " " << samplings << "\n" << penalty.array() << endl; std::ofstream f4(MPI::prefix + std::to_string(nconv) + hisfile); if (f4) f4 << histo << endl; if (min>0 && !this->quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; histo.setZero(); f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += penalty[coord] - uold; } //!< Average penalty function across all nodes }; //!< Penalty function with MPI exchange #endif #ifdef ENABLE_POWERSASA /* * @todo: * - can only a subset of sasa be calculated? Note that it's the * `update_coord()` function that takes up most time. * - delegate to GPU? In the PowerSasa paper this is mentioned */ template<class Tspace> class SASAEnergy : public Energybase { public: std::vector<float> sasa, radii; private: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tpvec Tpvec; Tspace& spc; double probe; // sasa probe radius (angstrom) double conc=0;// co-solute concentration (mol/l) Average<double> avgArea; // average surface area std::shared_ptr<POWERSASA::PowerSasa<float,Point>> ps=nullptr; void updateSASA(const Tpvec &p) { assert(ps != nullptr); radii.resize(p.size()); std::transform(p.begin(), p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;}); ps->update_coords(spc.positions(), radii); // slowest step! for (size_t i=0; i<p.size(); i++) { auto &a = atoms<Tparticle>[p[i].id]; if (std::fabs(a.tfe)>1e-9 || std::fabs(a.tension)>1e-9) ps->calc_sasa_single(i); } sasa = ps->getSasa(); assert(sasa.size()==p.size()); } void to_json(json &j) const override { using namespace u8; j["molarity"] = conc / 1.0_molar; j["radius"] = probe / 1.0_angstrom; j[bracket("SASA")+"/"+angstrom+squared] = avgArea.avg() / 1.0_angstrom; _roundjson(j,5); } /* * @note * This is not enough as the PowerSasa object contains data * that also need syncing. It works due to the `update` (expensive!) * call in `energy`. */ void sync(Energybase *basePtr, Change &c) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) { if (c.all || c.dV) { radii = other->radii; sasa = other->sasa; } else { for (auto &d : c.groups) { int offset = std::distance(spc.p.begin(), spc.groups.at(d.index).begin()); for (int j : d.atoms) { int i = j + offset; radii[i] = other->radii[i]; sasa[i] = other->sasa[i]; } } } } } public: SASAEnergy(const json &j, Tspace &spc) : spc(spc) { name = "sasa"; cite = "doi:10.1002/jcc.21844"; probe = j.value("radius", 1.4) * 1.0_angstrom; conc = j.at("molarity").get<double>() * 1.0_molar; init(); } void init() override { radii.resize( spc.p.size() ); std::transform( spc.p.begin(), spc.p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;} ); if (ps==nullptr) ps = std::make_shared<POWERSASA::PowerSasa<float,Point>>(spc.positions(),radii); updateSASA(spc.p); } double energy(Change &change) override { double u=0, A=0; /* * ideally we want to call `update` only if `key==NEW` but * syncronising the PowerSasa object is difficult since it's * non-copyable. */ updateSASA(spc.p); // ideally we want for (size_t i=0; i<spc.p.size(); ++i) { auto &a = atoms<Tparticle>[ spc.p[i].id ]; u += sasa[i] * (a.tension + conc * a.tfe); A += sasa[i]; } avgArea+=A; // sample average area for accepted confs. only return u; } }; //!< SASA energy from transfer free energies #endif struct Example2D : public Energybase { Point& i; // reference to 1st particle in the system template<typename Tspace> Example2D(const json &j, Tspace &spc): i(spc.p.at(0).pos) { name = "Example2D"; } double energy(Change &change) override; }; template<typename Tspace> class Hamiltonian : public Energybase, public BasePointerVector<Energybase> { protected: double maxenergy=pc::infty; //!< Maximum allowed energy change typedef typename Tspace::Tparticle Tparticle; void to_json(json &j) const override { for (auto i : this->vec) j.push_back(*i); } void addEwald(const json &j, Tspace &spc) { if (j.count("coulomb")==1) if (j["coulomb"].at("type")=="ewald") push_back<Energy::Ewald<Tspace>>(j["coulomb"], spc); } //!< Adds an instance of reciprocal space Ewald energies (if appropriate) public: Hamiltonian(Tspace &spc, const json &j) { using namespace Potential; typedef CombinedPairPotential<CoulombGalore,LennardJones<Tparticle>> CoulombLJ; typedef CombinedPairPotential<CoulombGalore,HardSphere<Tparticle>> CoulombHS; typedef CombinedPairPotential<CoulombGalore,WeeksChandlerAndersen<Tparticle>> CoulombWCA; typedef CombinedPairPotential<Coulomb,WeeksChandlerAndersen<Tparticle>> PrimitiveModelWCA; Energybase::name="hamiltonian"; for (auto &m : j.at("energy")) {// loop over move list size_t oldsize = vec.size(); for (auto it=m.begin(); it!=m.end(); ++it) { try { if (it.key()=="nonbonded_coulomblj") push_back<Energy::Nonbonded<Tspace,CoulombLJ>>(it.value(), spc); if (it.key()=="nonbonded") push_back<Energy::Nonbonded<Tspace,FunctorPotential<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_coulombhs") push_back<Energy::Nonbonded<Tspace,CoulombHS>>(it.value(), spc); if (it.key()=="nonbonded_coulombwca") push_back<Energy::Nonbonded<Tspace,CoulombWCA>>(it.value(), spc); if (it.key()=="nonbonded_pmwca") push_back<Energy::Nonbonded<Tspace,PrimitiveModelWCA>>(it.value(), spc); if (it.key()=="nonbonded_deserno") push_back<Energy::NonbondedCached<Tspace,DesernoMembrane<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_desernoAA") push_back<Energy::NonbondedCached<Tspace,DesernoMembraneAA<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="bonded") push_back<Energy::Bonded<Tspace>>(it.value(), spc); if (it.key()=="confine") push_back<Energy::Confine<Tspace>>(it.value(), spc); if (it.key()=="example2d") push_back<Energy::Example2D>(it.value(), spc); if (it.key()=="isobaric") push_back<Energy::Isobaric<Tspace>>(it.value(), spc); if (it.key()=="penalty") #ifdef ENABLE_MPI push_back<Energy::PenaltyMPI<Tspace>>(it.value(), spc); #else push_back<Energy::Penalty<Tspace>>(it.value(), spc); #endif #ifdef ENABLE_POWERSASA if (it.key()=="sasa") push_back<Energy::SASAEnergy<Tspace>>(it.value(), spc); #endif // additional energies go here... addEwald(it.value(), spc); // add reciprocal Ewald terms if appropriate if (it.key()=="maxenergy") { maxenergy = it.value().get<double>(); continue; } if (vec.size()==oldsize) throw std::runtime_error("unknown term"); } catch (std::exception &e) { throw std::runtime_error("Error adding energy '" + it.key() + "': " + e.what()); } } } } double energy(Change &change) override { double du=0; for (auto i : this->vec) { i->key=key; du += i->energy(change); if (du>maxenergy) break; // stop summing energies } return du; } //!< Energy due to changes void init() override { for (auto i : this->vec) i->init(); } void sync(Energybase* basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) if (other->size()==size()) { for (size_t i=0; i<size(); i++) this->vec[i]->sync( other->vec[i].get(), change ); return; } throw std::runtime_error("hamiltonian mismatch"); } }; //!< Aggregates and sum energy terms }//namespace }//namespace
#pragma once #include "core.h" #include "geometry.h" #include "space.h" #include "potentials.h" #include "multipole.h" #include "penalty.h" #include "mpi.h" #include <Eigen/Dense> #include <set> #ifdef ENABLE_POWERSASA #include <power_sasa.h> #endif namespace Faunus { namespace Energy { class Energybase { public: enum keys {OLD, NEW, NONE}; keys key=NONE; std::string name; std::string cite; virtual double energy(Change&)=0; //!< energy due to change virtual void to_json(json &j) const;; //!< json output virtual void sync(Energybase*, Change&); virtual void init(); //!< reset and initialize virtual inline void force(std::vector<Point> &forces) {}; // update forces on all particles }; void to_json(json &j, const Energybase &base); //!< Converts any energy class to json object /** * This holds Ewald setup and must *not* depend on particle type, nor depend on Space */ struct EwaldData { typedef std::complex<double> Tcomplex; Eigen::Matrix3Xd kVectors; // k-vectors, 3xK Eigen::VectorXd Aks; // 1xK, to minimize computational effort (Eq.24,DOI:10.1063/1.481216) Eigen::VectorXcd Qion, Qdip; // 1xK double alpha, rc, kc, check_k2_zero, lB; double const_inf, eps_surf; bool spherical_sum=true; bool ipbc=false; int kVectorsInUse=0; Point L; //!< Box dimensions void update(const Point &box); }; void from_json(const json &j, EwaldData &d); void to_json(json &j, const EwaldData &d); #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - EwaldData") { using doctest::Approx; EwaldData data = R"({ "ipbc": false, "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.update( Point(10,10,10) ); CHECK(data.ipbc == false); CHECK(data.const_inf == 1); CHECK(data.alpha == 0.894427190999916); CHECK(data.kVectors.cols() == 2975); CHECK(data.Qion.size() == data.kVectors.cols()); data.ipbc=true; data.update( Point(10,10,10) ); CHECK(data.kVectors.cols() == 846); CHECK(data.Qion.size() == data.kVectors.cols()); } #endif /** @brief recipe or policies for ion-ion ewald */ template<class Tspace, bool eigenopt=false /** use Eigen matrix ops where possible */> struct PolicyIonIon { typedef typename Tspace::Tpvec::iterator iter; Tspace *spc; Tspace *old=nullptr; // set only if key==NEW at first call to `sync()` PolicyIonIon(Tspace &spc) : spc(&spc) {} void updateComplex(EwaldData &data) const { if (eigenopt) if (data.ipbc==false) { auto pos = asEigenMatrix(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::pos); // Nx3 auto charge = asEigenVector(spc->p.begin(), spc->p.end(), &Tspace::Tparticle::charge); // Nx1 Eigen::MatrixXd kr = pos.matrix() * data.kVectors; // Nx3 * 3xK = NxK data.Qion.real() = (kr.array().cos().colwise()*charge).colwise().sum(); data.Qion.imag() = kr.array().sin().colwise().sum(); return; } for (int k=0; k<data.kVectors.cols(); k++) { const Point& kv = data.kVectors.col(k); EwaldData::Tcomplex Q(0,0); if (data.ipbc) for (auto &i : spc->p) Q += kv.cwiseProduct(i.pos).array().cos().prod() * i.charge; else for (auto &i : spc->p) { double dot = kv.dot(i.pos); Q += i.charge * EwaldData::Tcomplex( std::cos(dot), std::sin(dot) ); } data.Qion[k] = Q; } } //!< Update all k vectors void updateComplex(EwaldData &data, iter begin, iter end) const { assert(old!=nullptr); assert(spc->p.size() == old->p.size()); size_t ibeg = std::distance(spc->p.begin(), begin); // it->index size_t iend = std::distance(spc->p.begin(), end); // it->index for (int k=0; k<data.kVectors.cols(); k++) { auto& Q = data.Qion[k]; Point q = data.kVectors.col(k); if (data.ipbc) for (size_t i=ibeg; i<=iend; i++) { Q += q.cwiseProduct( spc->p[i].pos ).array().cos().prod() * spc->p[i].charge; Q -= q.cwiseProduct( old->p[i].pos ).array().cos().prod() * old->p[i].charge; } else for (size_t i=ibeg; i<=iend; i++) { double _new = q.dot(spc->p[i].pos); double _old = q.dot(old->p[i].pos); Q += spc->p[i].charge * EwaldData::Tcomplex( std::cos(_new), std::sin(_new) ); Q -= old->p[i].charge * EwaldData::Tcomplex( std::cos(_old), std::sin(_old) ); } } } //!< Optimized update of k subset. Require access to old positions through `old` pointer double selfEnergy(const EwaldData &d) { double E = 0; for (auto& i : spc->p) E += i.charge * i.charge; return -d.alpha*E / std::sqrt(pc::pi) * d.lB; } double surfaceEnergy(const EwaldData &d) { if (d.const_inf < 0.5) return 0; Point qr(0,0,0); for (auto &i : spc->p) qr += i.charge*i.pos; return d.const_inf * 2 * pc::pi / ( (2*d.eps_surf+1) * spc->geo.getVolume() ) * qr.dot(qr) * d.lB; } double reciprocalEnergy(const EwaldData &d) { double E = 0; if (eigenopt) // known at compile time E = d.Aks.cwiseProduct( d.Qion.cwiseAbs2() ).sum(); else for (int k=0; k<d.Qion.size(); k++) E += d.Aks[k] * std::norm( d.Qion[k] ); return 2 * pc::pi / spc->geo.getVolume() * E * d.lB; } }; #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] Ewald - IonIonPolicy") { using doctest::Approx; typedef Space<Geometry::Cuboid, Particle<Charge,Dipole>> Tspace; Tspace spc; spc.p.resize(2); spc.geo = R"( {"length": 10} )"_json; spc.p[0] = R"( {"pos": [0,0,0], "q": 1.0} )"_json; spc.p[1] = R"( {"pos": [1,0,0], "q": -1.0} )"_json; PolicyIonIon<Tspace> ionion(spc); EwaldData data = R"({ "epsr": 1.0, "alpha": 0.894427190999916, "epss": 1.0, "kcutoff": 11.0, "spherical_sum": true, "cutoff": 5.0})"_json; data.ipbc = false; // PBC Ewald (http://dx.doi.org/10.1063/1.481216) data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.21303063979675319*data.lB) ); data.ipbc = true; // IPBC Ewald data.update( spc.geo.getLength() ); ionion.updateComplex( data ); CHECK( ionion.selfEnergy(data) == Approx(-1.0092530088080642*data.lB) ); CHECK( ionion.surfaceEnergy(data) == Approx(0.0020943951023931952*data.lB) ); CHECK( ionion.reciprocalEnergy(data) == Approx(0.0865107467*data.lB) ); } #endif /** @brief Ewald summation reciprocal energy */ template<class Tspace, class Policy=PolicyIonIon<Tspace>> class Ewald : public Energybase { private: EwaldData data; Policy policy; Tspace& spc; public: Ewald(const json &j, Tspace &spc) : policy(spc), spc(spc) { name = "ewald"; data = j; init(); } void init() override { data.update( spc.geo.getLength() ); policy.updateComplex(data); // brute force. todo: be selective } double energy(Change &change) override { double u=0; if (!change.empty()) { // If the state is NEW (trial state), then update all k-vectors if (key==NEW) { if (change.all || change.dV) { // everything changes data.update( spc.geo.getLength() ); policy.updateComplex(data); // update all (expensive!) } else { if (change.groups.size()==1) { // exactly one group is moved auto& d = change.groups[0]; auto& g = spc.groups[d.index]; if (d.atoms.size()==1) // exactly one atom is moved policy.updateComplex(data, g.begin()+d.atoms[0], g.begin()+d.atoms[0]); else policy.updateComplex(data, g.begin(), g.end()); } else policy.updateComplex(data); } } u = policy.selfEnergy(data) + policy.surfaceEnergy(data) + policy.reciprocalEnergy(data); } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (other->key==OLD) policy.old = &(other->spc); // give NEW access to OLD space for optimized updates data = other->data; // copy everything! } //!< Called after a move is rejected/accepted as well as before simulation void to_json(json &j) const override { j = data; } }; template<typename Tspace> class Isobaric : public Energybase { private: Tspace& spc; double P; // P/kT public: Isobaric(const json &j, Tspace &spc) : spc(spc) { name = "isobaric"; cite = "Frenkel & Smith 2nd Ed (Eq. 5.4.13)"; P = j.value("P/mM", 0.0) * 1.0_mM; if (P<1e-10) { P = j.value("P/Pa", 0.0) * 1.0_Pa; if (P<1e-10) P = j.at("P/atm").get<double>() * 1.0_atm; } } double energy(Change &change) override { if (change.dV || change.all) { double V = spc.geo.getVolume(); size_t N=0; for (auto &g : spc.groups) if (!g.empty()) { if (g.atomic) N += g.size(); else N++; } return P*V-(N+1)*std::log(V); } else return 0; } void to_json(json &j) const override { j["P/atm"] = P / 1.0_atm; j["P/mM"] = P / 1.0_mM; j["P/Pa"] = P / 1.0_Pa; _roundjson(j,5); } }; /** * @brief Base class for external potentials * * This will apply an external energy to a defined * list of molecules, either acting on individual * atoms or the mass-center. The specific energy * function, `func` is injected in derived classes. */ template<typename Tspace> class ExternalPotential : public Energybase { protected: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; bool COM=false; // apply on center-of-mass Tspace& spc; std::set<int> molids; // molecules to act upon std::function<double(const Tparticle&)> func=nullptr; // energy of single particle std::vector<std::string> _names; template<class Tparticle> double _energy(const Group<Tparticle> &g) const { double u=0; if (molids.find(g.id) != molids.end()) { if (COM) { // apply only to center of mass Tparticle cm; cm.pos = g.cm; u = func(cm); } else { for (auto &p : g) { u += func(p); if (std::isnan(u)) break; } } } return u; } //!< External potential on a single particle public: ExternalPotential(const json &j, Tspace &spc) : spc(spc) { name="external"; COM = j.value("com", false); _names = j.at("molecules").get<decltype(_names)>(); // molecule names auto _ids = names2ids(molecules<Tpvec>, _names); // names --> molids molids = std::set<int>(_ids.begin(), _ids.end()); // vector --> set if (molids.empty() || molids.size()!=_names.size() ) throw std::runtime_error(name + ": molecule list is empty"); } double energy(Change &change) override { assert(func!=nullptr); double u=0; if (change.dV or change.all) { for (auto &g : spc.groups) { // check all groups u += _energy(g); if (std::isnan(u)) break; } } else for (auto &d : change.groups) { auto &g = spc.groups.at(d.index); // check specified groups if (d.all or COM) // check all atoms in group u += _energy(g); // _energy also checks for molecule id else { // check only specified atoms in group if (molids.find(g.id) != molids.end()) for (auto i : d.atoms) u += func( *(g.begin()+i) ); } if (std::isnan(u)) break; } return u; } void to_json(json &j) const override { j["molecules"] = _names; j["com"] = COM; } }; //!< Base class for external potentials, acting on particles /** * @brief Confines molecules inside geometric shapes */ template<typename Tspace, typename base=ExternalPotential<Tspace>> class Confine : public base { public: enum Variant {sphere, cylinder, cuboid, none}; Variant type=none; private: Point origo={0,0,0}, dir={1,1,1}; Point low, high; double radius, k; bool scale=false; std::map<std::string, Variant> m = { {"sphere", sphere}, {"cylinder", cylinder}, {"cuboid", cuboid} }; public: Confine(const json &j, Tspace &spc) : base(j,spc) { base::name = "confine"; k = value_inf(j, "k") * 1.0_kJmol; // get floating point; allow inf/-inf type = m.at( j.at("type") ); if (type==sphere or type==cylinder) { radius = j.at("radius"); origo = j.value("origo", origo); scale = j.value("scale", scale); if (type==cylinder) dir = {1,1,0}; base::func = [&radius=radius, origo=origo, k=k, dir=dir](const typename base::Tparticle &p) { double d2 = (origo-p.pos).cwiseProduct(dir).squaredNorm() - radius*radius; if (d2>0) return 0.5*k*d2; return 0.0; }; // If volume is scaled, also scale the confining radius by adding a trigger // to `Space::scaleVolume()` if (scale) spc.scaleVolumeTriggers.push_back( [&radius=radius](Tspace &spc, double Vold, double Vnew) { radius *= std::cbrt(Vnew/Vold); } ); } if (type==cuboid) { low = j.at("low").get<Point>(); high = j.at("high").get<Point>(); base::func = [low=low, high=high, k=k](const typename base::Tparticle &p) { double u=0; Point d = low-p.pos; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; d = p.pos-high; for (int i=0; i<3; ++i) if (d[i]>0) u+=d[i]*d[i]; return 0.5*k*u; }; } } void to_json(json &j) const override { if (type==cuboid) j = {{"low", low}, {"high", high}}; if (type==sphere or type==cylinder) j = {{"radius", radius}}; if (type==sphere) { j["origo"] = origo; j["scale"] = scale; } for (auto &i : m) if (i.second==type) j["type"] = i.first; j["k"] = k/1.0_kJmol; base::to_json(j); _roundjson(j,5); } }; //!< Confine particles to a sub-region of the simulation container /* * The keys of the `intra` map are group index and the values * is a vector of `BondData`. For bonds between groups, fill * in `inter` which is evaluated for every update of call to * `energy`. * * @todo Optimize. */ template<typename Tspace> class Bonded : public Energybase { private: Tspace& spc; typedef typename Tspace::Tpvec Tpvec; typedef std::vector<std::shared_ptr<Potential::BondData>> BondVector; BondVector inter; // inter-molecular bonds std::map<int,BondVector> intra; // intra-molecular bonds void update() { using namespace Potential; intra.clear(); for (size_t i=0; i<spc.groups.size(); i++) { if (!spc.groups.empty()) { auto &g = spc.groups[i]; for (auto &b : molecules<Tpvec>.at(g.id).bonds) { intra[i].push_back( b->clone() ); // deep copy BondData from MoleculeData intra[i].back()->shift( std::distance(spc.p.begin(), g.begin()) ); Potential::setBondEnergyFunction( intra[i].back(), spc.p ); } } } } // finds and adds all intra-molecular bonds of active molecules double sum( const BondVector &v ) const { double u=0; for (auto &b : v) { assert(b->hasEnergyFunction()); u += b->energy(spc.geo.distanceFunc); } return u; } // sum energy in vector of BondData public: Bonded(const json &j, Tspace &spc) : spc(spc) { name = "bonded"; update(); if (j.is_object()) if (j.count("bondlist")==1) inter = j["bondlist"].get<BondVector>(); for (auto &i : inter) // set all energy functions Potential::setBondEnergyFunction( i, spc.p ); } void to_json(json &j) const override { if (!inter.empty()) j["bondlist"] = inter; if (!intra.empty()) { json& _j = j["bondlist-intramolecular"]; _j = json::array(); for (auto &i : intra) for (auto &b : i.second) _j.push_back(b); } } double energy(Change &c) override { double u=0; if ( !c.empty() ) { u = sum(inter); // energy of inter-molecular bonds if ( c.all || c.dV ) { for (auto& i : intra) // energy of intra-molecular bonds if (!spc.groups[i.first].empty()) // add only if group is active u += sum(i.second); } else for (auto &d : c.groups) if (d.internal) u += sum( intra[d.index] ); } return u; }; // brute force -- refine this! }; /** * @brief Nonbonded energy using a pair-potential */ template<typename Tspace, typename Tpairpot> class Nonbonded : public Energybase { private: double g2gcnt=0, g2gskip=0; protected: typedef typename Tspace::Tgroup Tgroup; double Rc2_g2g=pc::infty; void to_json(json &j) const override { j["pairpot"] = pairpot; j["cutoff_g2g"] = std::sqrt(Rc2_g2g); } template<typename T> inline bool cut(const T &g1, const T &g2) { g2gcnt++; if (g1.atomic || g2.atomic) return false; if ( spc.geo.sqdist(g1.cm, g2.cm)<Rc2_g2g ) return false; g2gskip++; return true; } //!< true if group<->group interaction can be skipped template<typename T> inline double i2i(const T &a, const T &b) { assert(&a!=&b && "a and b cannot be the same particle"); return pairpot(a, b, spc.geo.vdist(a.pos, b.pos)); } /* * Internal energy in group, calculating all with all or, if `index` * is given, only a subset. Index specifies the internal index (starting * at zero) of changed particles within the group. */ double g_internal(const Tgroup &g, const std::vector<int> &index=std::vector<int>()) { using namespace ranges; double u=0; if (index.empty()) // assume that all atoms have changed for ( auto i = g.begin(); i != g.end(); ++i ) for ( auto j=i; ++j != g.end(); ) u += i2i(*i, *j); else { // only a subset have changed auto fixed = view::ints( 0, int(g.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (int i : index) {// moved<->static for (int j : fixed ) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } for (int i : index) // moved<->moved for (int j : index) if (j>i) { u += i2i( *(g.begin()+i), *(g.begin()+j)); } } return u; } /* * Calculates the interaction energy of a particle, `i`, * and checks (1) if it is already part of Space, or (2) * external to space. */ double i2all(const typename Tspace::Tparticle &i) { double u=0; auto it = spc.findGroupContaining(i); // iterator to group if (it!=spc.groups.end()) { // check if i belongs to group in space for (auto &g : spc.groups) // i with all other particles if (&g!=&(*it)) // avoid self-interaction if (!cut(g, *it)) // check g2g cut-off for (auto &j : g) // loop over particles in other group u += i2i(i,j); for (auto &j : *it) // i with all particles in own group if (&j!=&i) u += i2i(i,j); } else // particle does not belong to any group for (auto &g : spc.groups) // i with all other *active* particles for (auto &j : g) // (this will include only active particles) u += i2i(i,j); return u; } /* * Group-to-group energy. A subset of `g1` can be given with `index` which refers * to the internal index (starting at zero) of the first group, `g1 * NOTE: the interpretation of this function is extended to also consider the mutual interactions * of a subset of each group and in such case returns sub1 <-> 2 and !sub1<->sub2, * hence excluding !sub1 <-> !sub2 in comparision to calling onconstrained g2g. In absence * of sub1 any sub2 is ignored. */ virtual double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) { using namespace ranges; double u = 0; if (!cut(g1,g2)) { if ( index.empty() && jndex.empty() ) // if index is empty, assume all in g1 have changed for (auto &i : g1) for (auto &j : g2) { u += i2i(i,j); } else {// only a subset of g1 for (auto i : index) for (auto j=g2.begin(); j!=g2.end(); ++j) { u += i2i( *(g1.begin()+i), *j); } if ( !jndex.empty() ) { auto fixed = view::ints( 0, int(g1.size()) ) | view::remove_if( [&index](int i){return std::binary_search(index.begin(), index.end(), i);}); for (auto i : jndex) // moved2 <-| for (auto j : fixed) {// static1 <-| u += i2i( *(g2.begin()+i), *(g1.begin()+j)); } } } } return u; } public: Tspace& spc; //!< Space to operate on Tpairpot pairpot; //!< Pair potential Nonbonded(const json &j, Tspace &spc) : spc(spc) { name="nonbonded"; pairpot = j; Rc2_g2g = std::pow( j.value("cutoff_g2g", pc::infty), 2); } void force(std::vector<Point> &forces) override { auto &p = spc.p; // alias to particle vector (reference) assert(forces.size() == p.size() && "the forces size must match the particle size"); for (size_t i=0; i<p.size()-1; i++) for (size_t j=i+1; j<p.size(); j++) { Point r = spc.geo.vdist(p[i].pos, p[j].pos); // minimum distance vector Point f ;//= pairpot.force( p[i], p[j], r.squaredNorm(), r ); forces[i] += f; forces[j] -= f; } } double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.dV) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); if (i->atomic) u += g_internal(*i); } return u; } // did everything change? if (change.all) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = spc.groups.begin(); i < spc.groups.end(); ++i ) { for ( auto j=i; ++j != spc.groups.end(); ) u += g2g( *i, *j ); u += g_internal(*i); } // more todo here... return u; } // if exactly ONE molecule is changed if (change.groups.size()==1 && !change.dNpart) { auto& d = change.groups[0]; auto gindex = spc.groups.at(d.index).to_index(spc.p.begin()).first; if (d.atoms.size()==1) // exactly one atom has moved return i2all(spc.p.at(gindex+d.atoms[0])); auto& g1 = spc.groups.at(d.index); for (auto &g2 : spc.groups) if (&g1 != &g2) u += g2g(g1, g2, d.atoms); if (d.internal) u += g_internal(g1, d.atoms); return u; } // if (change.dNpart) { auto moved = change.touchedGroupIndex(); // index of moved groups std::vector<int> Moved; for (auto i: moved) Moved.push_back(i); std::sort( Moved.begin(), Moved.end() ); auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&Moved](int i){return std::binary_search(Moved.begin(), Moved.end(), i);} ); // index of static groups for ( auto cg1 = change.groups.begin(); cg1 < change.groups.end() ; ++cg1 ) { // Loop over all changed groups std::vector<int> ifiltered, jfiltered; for (auto i: cg1->atoms) { if ( i < spc.groups.at(cg1->index).size() ) ifiltered.push_back(i); } if ( !( cg1->dNpart && ifiltered.empty() ) ) // Skip if particles are removed for ( auto j : fixed) { u += g2g( spc.groups.at(cg1->index), spc.groups[j], ifiltered, jfiltered ); } for ( auto cg2 = cg1; ++cg2 != change.groups.end(); ) { for (auto i: cg2->atoms) if ( i < spc.groups.at(cg2->index).size() ) jfiltered.push_back(i); if ( !( (cg1->dNpart && ifiltered.empty()) && (cg2->dNpart && jfiltered.empty()) ) ) //Skip if particles are removed from both u += g2g( spc.groups.at(cg1->index), spc.groups.at(cg2->index), ifiltered, jfiltered ); jfiltered.clear(); } if ( ifiltered.size() != 0 ) u += g_internal( spc.groups.at( cg1->index ), ifiltered ); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) { for ( auto j=i; ++j != moved.end(); ) u += g2g( spc.groups[*i], spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(spc.groups[i], spc.groups[j]); // more todo! } return u; } }; //!< Nonbonded, pair-wise additive energy term template<typename Tspace, typename Tpairpot> class NonbondedCached : public Nonbonded<Tspace,Tpairpot> { private: typedef Nonbonded<Tspace,Tpairpot> base; typedef typename Tspace::Tgroup Tgroup; Eigen::MatrixXf cache; Tspace &spc; double g2g(const Tgroup &g1, const Tgroup &g2, const std::vector<int> &index=std::vector<int>(), const std::vector<int> &jndex=std::vector<int>()) override { int i = &g1 - &base::spc.groups.front(); int j = &g2 - &base::spc.groups.front(); if (j<i) std::swap(i,j); if (base::key==Energybase::NEW) { // if this is from the trial system, double u = 0; if (!base::cut(g1,g2)) { for (auto &i : g1) for (auto &j : g2) u += base::i2i(i,j); } cache(i,j) = u; } return cache(i,j); // return (cached) value } public: NonbondedCached(const json &j, Tspace &spc) : base(j,spc), spc(spc) { base::name += "EM"; init(); } void init() override { cache.resize( spc.groups.size(), spc.groups.size() ); cache.setZero(); for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) { int k = &(*i) - &base::spc.groups.front(); int l = &(*j) - &base::spc.groups.front(); if (l<k) std::swap(k,l); double u = 0; if (!base::cut(*i,*j)) { for (auto &k : *i) for (auto &l : *j) u += base::i2i(k,l); } cache(k,l) = u; } } } //!< Cache pair interactions in matrix double energy(Change &change) override { using namespace ranges; double u=0; if (!change.empty()) { if (change.all || change.dV) { #pragma omp parallel for reduction (+:u) schedule (dynamic) for ( auto i = base::spc.groups.begin(); i < base::spc.groups.end(); ++i ) { for ( auto j=i; ++j != base::spc.groups.end(); ) u += g2g( *i, *j ); } return u; } // if exactly ONE molecule is changed if (change.groups.size()==1) { auto& d = change.groups[0]; auto& g1 = base::spc.groups.at(d.index); for (auto &g2 : base::spc.groups) { if (&g1 != &g2) u += g2g(g1, g2, d.atoms); } return u; } auto moved = change.touchedGroupIndex(); // index of moved groups auto fixed = view::ints( 0, int(base::spc.groups.size()) ) | view::remove_if( [&moved](int i){return std::binary_search(moved.begin(), moved.end(), i);} ); // index of static groups // moved<->moved for ( auto i = moved.begin(); i != moved.end(); ++i ) for ( auto j=i; ++j != moved.end(); ) { u += g2g( base::spc.groups[*i], base::spc.groups[*j] ); } // moved<->static for ( auto i : moved) for ( auto j : fixed) u += g2g(base::spc.groups[i], base::spc.groups[j]); // more todo! } return u; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); if (change.all || change.dV) cache.triangularView<Eigen::StrictlyUpper>() = (other->cache).template triangularView<Eigen::StrictlyUpper>(); else for (auto &d : change.groups) { for (int i=0; i<d.index; i++) cache(i,d.index) = other->cache(i,d.index); for (size_t i=d.index+1; i<base::spc.groups.size(); i++) cache(d.index,i) = other->cache(d.index,i); } } //!< Copy energy matrix from other }; //!< Nonbonded with cached energies (Energy Matrix) /** * `udelta` is the total change of updating the energy function. If * not handled this will appear as an energy drift (which it is!). To * avoid this, this term is added to the energy but since it's the * same in both the trial and old state energies it will not affect * MC move acceptance. */ template<typename Tspace> class Penalty : public Energybase { protected: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tgroup Tgroup; typedef typename Tspace::Tpvec Tpvec; typedef typename std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> Tcoord; Tspace &spc; bool nodrift; bool quiet; size_t dim=0; size_t cnt=0; // number of calls to `sync()` size_t nupdate; // update frequency [steps] size_t samplings; size_t nconv=0; double udelta=0; // total energy change of updating penalty function double scale; // scaling factor for f0 double f0; // penalty increment std::string file, hisfile; std::vector<Tcoord> rcvec; // vector of reaction coordinate functions std::vector<double> coord; // latest reaction coordinate Table<int> histo; Table<double> penalty; public: Penalty(const json &j, Tspace &spc) : spc(spc) { using namespace ReactionCoordinate; name = "penalty"; f0 = j.value("f0", 0.5); scale = j.value("scale", 0.8); quiet = j.value("quiet", true); nupdate = j.value("update", 0); samplings = j.value("samplings", 1); nodrift = j.value("nodrift", true); file = j.at("file").get<std::string>(); hisfile = j.value("histogram", "penalty-histogram.dat"); std::vector<double> binwidth, min, max; if (scale<0 or scale>1) throw std::runtime_error("`scale` must be in the interval [0:1]"); for (auto &i : j.at("coords")) if (i.is_object()) if (i.size()==1) { std::shared_ptr<ReactionCoordinate::ReactionCoordinateBase> rc=nullptr; for (auto it=i.begin(); it!=i.end(); ++it) { if (it.key()=="atom") rc = std::make_shared<AtomProperty>(it.value(), spc); if (it.key()=="system") rc = std::make_shared<SystemProperty>(it.value(), spc); if (it.key()=="cmcm") rc = std::make_shared<MassCenterSeparation>(it.value(), spc); if (it.key()=="angle") rc = std::make_shared<PrincipalAxisAngle>(it.value(), spc); if (rc!=nullptr) { if (rc->min>=rc->max || rc->binwidth<=0) throw std::runtime_error("min<max and binwidth>0 required for '" + it.key() + "'"); rcvec.push_back(rc); binwidth.push_back( rc->binwidth ); min.push_back( rc->min ); max.push_back( rc->max ); } else throw std::runtime_error("unknown coordinate type '" + it.key() + "'"); } } dim = binwidth.size(); if (dim<1 || dim>2) throw std::runtime_error("minimum one maximum two coordinates required"); coord.resize(rcvec.size(), 0); histo.reInitializer(binwidth, min, max); penalty.reInitializer(binwidth, min, max); std::ifstream f(MPI::prefix+file); if (f) { cout << "Loading penalty function '" << MPI::prefix+file << "'" << endl; std::string hash; f >> hash >> f0 >> samplings; for (int row=0; row<penalty.rows(); row++) for (int col=0; col<penalty.cols(); col++) if (not f.eof()) f >> penalty(row,col); else throw std::runtime_error("penalty file dimension mismatch"); } } virtual ~Penalty() { std::ofstream f1(MPI::prefix + file), f2(MPI::prefix + hisfile); if (f1) f1 << "# " << f0 << " " << samplings << "\n" << penalty.array() - penalty.minCoeff() << "\n"; if (f2) f2 << histo << "\n"; // add function to save to numpy-friendly file... } void to_json(json &j) const override { j["file"] = file; j["scale"] = scale; j["update"] = nupdate; j["nodrift"] = nodrift; j["histogram"] = hisfile; j["f0_final"] = f0; auto& _j = j["coords"] = json::array(); for (auto rc : rcvec) { json t; t[rc->name] = *rc; _j.push_back(t); } } double energy(Change &change) override { assert(rcvec.size()<=coord.size()); double u=0; coord.resize( rcvec.size() ); if (!change.empty()) { for (size_t i=0; i<rcvec.size(); i++) { coord.at(i) = rcvec[i]->operator()(); if ( not rcvec[i]->inRange(coord[i]) ) return pc::infty; } penalty.to_index(coord); u = penalty[coord]; } return (nodrift) ? u - udelta : u; } /* * @todo: If this is called before `energy()`, the coord * is never calculated and causes indefined behavior */ virtual void update(const std::vector<double> &c) { if (++cnt % nupdate == 0 and f0>0) { bool b = histo.minCoeff() >= (int)samplings; if (b) { double min = penalty.minCoeff(); penalty = penalty.array() - min; if (not quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); histo.setZero(); udelta += -min; } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += f0; } void sync(Energybase *basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); assert(other); update(other->coord); // is inside allowed range other->update(other->coord); } // @todo: this doubles the MPI communication }; #ifdef ENABLE_MPI template<typename Tspace, typename Base=Penalty<Tspace>> struct PenaltyMPI : public Base { using Base::samplings; using Base::penalty; using Base::udelta; using Base::scale; using Base::histo; using Base::coord; using Base::cnt; using Base::f0; using Base::file; using Base::hisfile; using Base::nconv; Eigen::VectorXi weights; // array w. mininum histogram counts Eigen::VectorXd buffer; // receive buffer for penalty functions PenaltyMPI(const json &j, Tspace &spc) : Base(j,spc) { weights.resize( MPI::mpi.nproc() ); buffer.resize( penalty.size()*MPI::mpi.nproc() ); } void update(const std::vector<double> &c) override { using namespace Faunus::MPI; double uold = penalty[c]; if (++cnt % this->nupdate == 0 and f0>0) { int min = histo.minCoeff(); MPI_Barrier(mpi.comm); MPI_Allgather(&min, 1, MPI_INT, weights.data(), 1, MPI_INT, mpi.comm); if ( weights.maxCoeff() > samplings ) { MPI_Gather(penalty.data(), penalty.size(), MPI_DOUBLE, buffer.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); if (mpi.isMaster()) { penalty.setZero(); for (int i=0; i<mpi.nproc(); i++) penalty += Eigen::Map<Eigen::MatrixXd>( buffer.data()+i*penalty.size(), penalty.rows(), penalty.cols() ); penalty = ( penalty.array() - penalty.minCoeff() ) / double(mpi.nproc()); } MPI_Bcast(penalty.data(), penalty.size(), MPI_DOUBLE, 0, mpi.comm); nconv += 1; std::ofstream f3(MPI::prefix + std::to_string(nconv) + file); if (f3) f3 << "# " << f0 << " " << samplings << "\n" << penalty.array() << endl; std::ofstream f4(MPI::prefix + std::to_string(nconv) + hisfile); if (f4) f4 << histo << endl; if (min>0 && !this->quiet) cout << "Barriers/kT. Penalty=" << penalty.maxCoeff() << " Histogram=" << std::log(double(histo.maxCoeff())/histo.minCoeff()) << endl; histo.setZero(); f0 = f0 * scale; // reduce penalty energy samplings = std::ceil( samplings / scale ); } } coord = c; histo[coord]++; penalty[coord] += f0; udelta += penalty[coord] - uold; } //!< Average penalty function across all nodes }; //!< Penalty function with MPI exchange #endif #ifdef ENABLE_POWERSASA /* * @todo: * - can only a subset of sasa be calculated? Note that it's the * `update_coord()` function that takes up most time. * - delegate to GPU? In the PowerSasa paper this is mentioned */ template<class Tspace> class SASAEnergy : public Energybase { public: std::vector<float> sasa, radii; private: typedef typename Tspace::Tparticle Tparticle; typedef typename Tspace::Tpvec Tpvec; Tspace& spc; double probe; // sasa probe radius (angstrom) double conc=0;// co-solute concentration (mol/l) Average<double> avgArea; // average surface area std::shared_ptr<POWERSASA::PowerSasa<float,Point>> ps=nullptr; void updateSASA(const Tpvec &p) { assert(ps != nullptr); radii.resize(p.size()); std::transform(p.begin(), p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;}); ps->update_coords(spc.positions(), radii); // slowest step! for (size_t i=0; i<p.size(); i++) { auto &a = atoms<Tparticle>[p[i].id]; if (std::fabs(a.tfe)>1e-9 || std::fabs(a.tension)>1e-9) ps->calc_sasa_single(i); } sasa = ps->getSasa(); assert(sasa.size()==p.size()); } void to_json(json &j) const override { using namespace u8; j["molarity"] = conc / 1.0_molar; j["radius"] = probe / 1.0_angstrom; j[bracket("SASA")+"/"+angstrom+squared] = avgArea.avg() / 1.0_angstrom; _roundjson(j,5); } /* * @note * This is not enough as the PowerSasa object contains data * that also need syncing. It works due to the `update` (expensive!) * call in `energy`. */ void sync(Energybase *basePtr, Change &c) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) { if (c.all || c.dV) { radii = other->radii; sasa = other->sasa; } else { for (auto &d : c.groups) { int offset = std::distance(spc.p.begin(), spc.groups.at(d.index).begin()); for (int j : d.atoms) { int i = j + offset; radii[i] = other->radii[i]; sasa[i] = other->sasa[i]; } } } } } public: SASAEnergy(const json &j, Tspace &spc) : spc(spc) { name = "sasa"; cite = "doi:10.1002/jcc.21844"; probe = j.value("radius", 1.4) * 1.0_angstrom; conc = j.at("molarity").get<double>() * 1.0_molar; init(); } void init() override { radii.resize( spc.p.size() ); std::transform( spc.p.begin(), spc.p.end(), radii.begin(), [this](auto &a){ return atoms<Tparticle>[a.id].sigma*0.5 + this->probe;} ); if (ps==nullptr) ps = std::make_shared<POWERSASA::PowerSasa<float,Point>>(spc.positions(),radii); updateSASA(spc.p); } double energy(Change &change) override { double u=0, A=0; /* * ideally we want to call `update` only if `key==NEW` but * syncronising the PowerSasa object is difficult since it's * non-copyable. */ updateSASA(spc.p); // ideally we want for (size_t i=0; i<spc.p.size(); ++i) { auto &a = atoms<Tparticle>[ spc.p[i].id ]; u += sasa[i] * (a.tension + conc * a.tfe); A += sasa[i]; } avgArea+=A; // sample average area for accepted confs. only return u; } }; //!< SASA energy from transfer free energies #endif struct Example2D : public Energybase { Point& i; // reference to 1st particle in the system template<typename Tspace> Example2D(const json &j, Tspace &spc): i(spc.p.at(0).pos) { name = "Example2D"; } double energy(Change &change) override; }; template<typename Tspace> class Hamiltonian : public Energybase, public BasePointerVector<Energybase> { protected: double maxenergy=pc::infty; //!< Maximum allowed energy change typedef typename Tspace::Tparticle Tparticle; void to_json(json &j) const override { for (auto i : this->vec) j.push_back(*i); } void addEwald(const json &j, Tspace &spc) { if (j.count("coulomb")==1) if (j["coulomb"].at("type")=="ewald") push_back<Energy::Ewald<Tspace>>(j["coulomb"], spc); } //!< Adds an instance of reciprocal space Ewald energies (if appropriate) public: Hamiltonian(Tspace &spc, const json &j) { using namespace Potential; typedef CombinedPairPotential<CoulombGalore,LennardJones<Tparticle>> CoulombLJ; typedef CombinedPairPotential<CoulombGalore,HardSphere<Tparticle>> CoulombHS; typedef CombinedPairPotential<CoulombGalore,WeeksChandlerAndersen<Tparticle>> CoulombWCA; typedef CombinedPairPotential<Coulomb,WeeksChandlerAndersen<Tparticle>> PrimitiveModelWCA; Energybase::name="hamiltonian"; for (auto &m : j.at("energy")) {// loop over move list size_t oldsize = vec.size(); for (auto it=m.begin(); it!=m.end(); ++it) { try { if (it.key()=="nonbonded_coulomblj") push_back<Energy::Nonbonded<Tspace,CoulombLJ>>(it.value(), spc); if (it.key()=="nonbonded") push_back<Energy::Nonbonded<Tspace,FunctorPotential<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_coulombhs") push_back<Energy::Nonbonded<Tspace,CoulombHS>>(it.value(), spc); if (it.key()=="nonbonded_coulombwca") push_back<Energy::Nonbonded<Tspace,CoulombWCA>>(it.value(), spc); if (it.key()=="nonbonded_pmwca") push_back<Energy::Nonbonded<Tspace,PrimitiveModelWCA>>(it.value(), spc); if (it.key()=="nonbonded_deserno") push_back<Energy::NonbondedCached<Tspace,DesernoMembrane<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="nonbonded_desernoAA") push_back<Energy::NonbondedCached<Tspace,DesernoMembraneAA<typename Tspace::Tparticle>>>(it.value(), spc); if (it.key()=="bonded") push_back<Energy::Bonded<Tspace>>(it.value(), spc); if (it.key()=="confine") push_back<Energy::Confine<Tspace>>(it.value(), spc); if (it.key()=="example2d") push_back<Energy::Example2D>(it.value(), spc); if (it.key()=="isobaric") push_back<Energy::Isobaric<Tspace>>(it.value(), spc); if (it.key()=="penalty") #ifdef ENABLE_MPI push_back<Energy::PenaltyMPI<Tspace>>(it.value(), spc); #else push_back<Energy::Penalty<Tspace>>(it.value(), spc); #endif #ifdef ENABLE_POWERSASA if (it.key()=="sasa") push_back<Energy::SASAEnergy<Tspace>>(it.value(), spc); #endif // additional energies go here... addEwald(it.value(), spc); // add reciprocal Ewald terms if appropriate if (it.key()=="maxenergy") { maxenergy = it.value().get<double>(); continue; } if (vec.size()==oldsize) throw std::runtime_error("unknown term"); } catch (std::exception &e) { throw std::runtime_error("Error adding energy '" + it.key() + "': " + e.what()); } } } } double energy(Change &change) override { double du=0; for (auto i : this->vec) { i->key=key; du += i->energy(change); if (du>maxenergy) break; // stop summing energies } return du; } //!< Energy due to changes void init() override { for (auto i : this->vec) i->init(); } void sync(Energybase* basePtr, Change &change) override { auto other = dynamic_cast<decltype(this)>(basePtr); if (other) if (other->size()==size()) { for (size_t i=0; i<size(); i++) this->vec[i]->sync( other->vec[i].get(), change ); return; } throw std::runtime_error("hamiltonian mismatch"); } }; //!< Aggregates and sum energy terms }//namespace }//namespace
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const double x_shear,const double x_shear, % const double width,const double height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const double x_shear,const double y_shear, const double width,const double height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=(ssize_t) ceil(min.x-0.5); geometry.y=(ssize_t) ceil(min.y-0.5); geometry.width=(size_t) floor(max.x-min.x+0.5); geometry.height=(size_t) floor(max.y-min.y+0.5); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The result will be auto-croped if the artifact "deskew:auto-crop" is % defined, while the amount the image is to be deskewed, in degrees is also % saved as the artifact "deskew:angle". % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrixs, MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection) { MatrixInfo *swap; register MatrixInfo *p, *q; register ssize_t x; size_t step; p=source_matrixs; q=destination_matrixs; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrixs=AcquireMatrixInfo(width,image->rows, sizeof(unsigned short),exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs=DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } for (j=0; j < 256; j++) { c=(unsigned char) j; for (count=0; c != 0; c>>=1) count+=c & 0x01; bits[j]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,-1,projection); (void) NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,1,projection); image_view=DestroyCacheView(image_view); destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; PixelInfo background; double count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(image,p); background.green+=QuantumScale*GetPixelGreen(image,p); background.blue+=QuantumScale*GetPixelBlue(image,p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha+=QuantumScale*GetPixelAlpha(image,p); count++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->background_color.red=(double) ClampToQuantum(QuantumRange* background.red/count); image->background_color.green=(double) ClampToQuantum(QuantumRange* background.green/count); image->background_color.blue=(double) ClampToQuantum(QuantumRange* background.blue/count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha=(double) ClampToQuantum(QuantumRange* background.alpha/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MagickPathExtent]; (void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod, exception); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; if (rotations == 0) return(CloneImage(image,0,0,MagickTrue,exception)); if ((rotations == 1) || (rotations == 3)) rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); else rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (rotate_image == (Image *) NULL) return((Image *) NULL); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image,tile_pixels) <= (QuantumRange/2)) { tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { register ssize_t y; /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(rotate_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(rotate_image); if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((width-1)-y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image,tile_pixels) <= (QuantumRange/2)) { tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; background=image->background_color; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=x_offset*GetPixelChannels(image); displacement=degrees*(double) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (x_offset+width+step-i) > image->columns) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_XShearImage) #endif proceed=SetImageProgress(image,XShearImageTag,progress++,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; background=image->background_color; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { ssize_t step; double area, displacement; PixelInfo pixel, source, destination; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=y_offset*GetPixelChannels(image); displacement=degrees*(double) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (y_offset+height+step-i) > image->rows) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_YShearImage) #endif proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute image size. */ bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5); bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)- image->columns)/2.0-0.5); bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)- image->rows)/2.0-0.5); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,image->compose,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->alpha_trait=image->alpha_trait; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=degrees-360.0*(ssize_t) (degrees/360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5); bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,image->compose, exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->alpha_trait=image->alpha_trait; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C r o p T o F i t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CropToFitImage() crops the sheared image as determined by the * bounding box % as defined by width and height and shearing angles. % % * The format of the CropToFitImage method is: % % MagickBooleanType * CropToFitImage(Image **image, % const double x_shear,const double * x_shear, % const double width,const double height, % const * MagickBooleanType rotate,ExceptionInfo *exception) % % A description of * each parameter follows. % % o image: the image. % % o x_shear, * y_shear, width, height: Defines a region of the image to crop. % % o * exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image ** image, const double x_shear, const double y_shear, const double width, const double height, const MagickBooleanType rotate, ExceptionInfo * exception) { Image * crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* * Calculate the rotated image size. */ extent[0].x = (double)(-width / 2.0); extent[0].y = (double)(-height / 2.0); extent[1].x = (double)width / 2.0; extent[1].y = (double)(-height / 2.0); extent[2].x = (double)(-width / 2.0); extent[2].y = (double)height / 2.0; extent[3].x = (double)width / 2.0; extent[3].y = (double)height / 2.0; for (i = 0; i < 4; i++) { extent[i].x += x_shear * extent[i].y; extent[i].y += y_shear * extent[i].x; if (rotate != MagickFalse) extent[i].x += x_shear * extent[i].y; extent[i].x += (double)(*image)->columns / 2.0; extent[i].y += (double)(*image)->rows / 2.0; } min = extent[0]; max = extent[0]; for (i = 1; i < 4; i++) { if (min.x > extent[i].x) min.x = extent[i].x; if (min.y > extent[i].y) min.y = extent[i].y; if (max.x < extent[i].x) max.x = extent[i].x; if (max.y < extent[i].y) max.y = extent[i].y; } geometry.x = (ssize_t) ceil(min.x - 0.5); geometry.y = (ssize_t) ceil(min.y - 0.5); geometry.width = (size_t) floor(max.x - min.x + 0.5); geometry.height = (size_t) floor(max.y - min.y + 0.5); page = (*image)->page; (void)ParseAbsoluteGeometry("0x0+0+0", &(*image)->page); crop_image = CropImage(*image, &geometry, exception); if (crop_image == (Image *) NULL) return (MagickFalse); crop_image->page = page; *image = DestroyImage(*image); *image = crop_image; return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s k e w I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DeskewImage() removes skew from the image. Skew is an artifact * that % occurs in scanned images because of the camera being misaligned, % * imperfections in the scanning or surface, or simply because the paper was * % not placed completely flat when scanned. % % The result will be * auto-croped if the artifact "deskew:auto-crop" is % defined, while the * amount the image is to be deskewed, in degrees is also % saved as the * artifact "deskew:angle". % % The format of the DeskewImage method is: % % * Image *DeskewImage(const Image *image,const double threshold, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o threshold: separate background from * foreground. % % o exception: return any errors or warnings in this * structure. % */ static void RadonProjection(const Image * image, MatrixInfo * source_matrixs, MatrixInfo * destination_matrixs, const ssize_t sign, size_t * projection) { MatrixInfo * swap; register MatrixInfo * p, *q; register ssize_t x; size_t step; p = source_matrixs; q = destination_matrixs; for (step = 1; step < GetMatrixColumns(p); step *= 2) { for (x = 0; x < (ssize_t) GetMatrixColumns(p); x += 2 * (ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i = 0; i < (ssize_t) step; i++) { for (y = 0; y < (ssize_t) (GetMatrixRows(p) - i - 1); y++) { if (GetMatrixElement(p, x + i, y, &element) == MagickFalse) continue; if (GetMatrixElement(p, x + i + step, y + i, &neighbor) == MagickFalse) continue; neighbor += element; if (SetMatrixElement(q, x + 2 * i, y, &neighbor) == MagickFalse) continue; if (GetMatrixElement(p, x + i + step, y + i + 1, &neighbor) == MagickFalse) continue; neighbor += element; if (SetMatrixElement(q, x + 2 * i + 1, y, &neighbor) == MagickFalse) continue; } for (; y < (ssize_t) (GetMatrixRows(p) - i); y++) { if (GetMatrixElement(p, x + i, y, &element) == MagickFalse) continue; if (GetMatrixElement(p, x + i + step, y + i, &neighbor) == MagickFalse) continue; neighbor += element; if (SetMatrixElement(q, x + 2 * i, y, &neighbor) == MagickFalse) continue; if (SetMatrixElement(q, x + 2 * i + 1, y, &element) == MagickFalse) continue; } for (; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p, x + i, y, &element) == MagickFalse) continue; if (SetMatrixElement(q, x + 2 * i, y, &element) == MagickFalse) continue; if (SetMatrixElement(q, x + 2 * i + 1, y, &element) == MagickFalse) continue; } } } swap = p; p = q; q = swap; } for (x = 0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum = 0; for (y = 0; y < (ssize_t) (GetMatrixRows(p) - 1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p, x, y, &element) == MagickFalse) continue; if (GetMatrixElement(p, x, y + 1, &neighbor) == MagickFalse) continue; delta = (ssize_t) element - (ssize_t) neighbor; sum += delta * delta; } projection[GetMatrixColumns(p) + sign * x - 1] = sum; } } static MagickBooleanType RadonTransform(const Image * image, const double threshold, size_t * projection, ExceptionInfo * exception) { CacheView * image_view; MatrixInfo * destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width = 1; width < ((image->columns + 7) / 8); width <<= 1); source_matrixs = AcquireMatrixInfo(width, image->rows, sizeof(unsigned short), exception); destination_matrixs = AcquireMatrixInfo(width, image->rows, sizeof(unsigned short), exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs = DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs = DestroyMatrixInfo(source_matrixs); return (MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs = DestroyMatrixInfo(destination_matrixs); source_matrixs = DestroyMatrixInfo(source_matrixs); return (MagickFalse); } for (j = 0; j < 256; j++) { c = (unsigned char)j; for (count = 0; c != 0; c >>= 1) count += c & 0x01; bits[j] = (unsigned short)count; } status = MagickTrue; image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } bit = 0; byte = 0; i = (ssize_t) (image->columns + 7) / 8; for (x = 0; x < (ssize_t) image->columns; x++) { byte <<= 1; if (((MagickRealType) GetPixelRed(image, p) < threshold) || ((MagickRealType) GetPixelGreen(image, p) < threshold) || ((MagickRealType) GetPixelBlue(image, p) < threshold)) byte |= 0x01; bit++; if (bit == 8) { value = bits[byte]; (void)SetMatrixElement(source_matrixs, --i, y, &value); bit = 0; byte = 0; } p += GetPixelChannels(image); } if (bit != 0) { byte <<= (8 - bit); value = bits[byte]; (void)SetMatrixElement(source_matrixs, --i, y, &value); } } RadonProjection(image, source_matrixs, destination_matrixs, -1, projection); (void)NullMatrix(source_matrixs); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } bit = 0; byte = 0; i = 0; for (x = 0; x < (ssize_t) image->columns; x++) { byte <<= 1; if (((MagickRealType) GetPixelRed(image, p) < threshold) || ((MagickRealType) GetPixelGreen(image, p) < threshold) || ((MagickRealType) GetPixelBlue(image, p) < threshold)) byte |= 0x01; bit++; if (bit == 8) { value = bits[byte]; (void)SetMatrixElement(source_matrixs, i++, y, &value); bit = 0; byte = 0; } p += GetPixelChannels(image); } if (bit != 0) { byte <<= (8 - bit); value = bits[byte]; (void)SetMatrixElement(source_matrixs, i++, y, &value); } } RadonProjection(image, source_matrixs, destination_matrixs, 1, projection); image_view = DestroyCacheView(image_view); destination_matrixs = DestroyMatrixInfo(destination_matrixs); source_matrixs = DestroyMatrixInfo(source_matrixs); return (MagickTrue); } static void GetImageBackgroundColor(Image * image, const ssize_t offset, ExceptionInfo * exception) { CacheView * image_view; PixelInfo background; double count; ssize_t y; /* * Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image, &background); count = 0.0; image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows - offset))) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) continue; for (x = 0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns - offset))) continue; background.red += QuantumScale * GetPixelRed(image, p); background.green += QuantumScale * GetPixelGreen(image, p); background.blue += QuantumScale * GetPixelBlue(image, p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha += QuantumScale * GetPixelAlpha(image, p); count++; p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); image->background_color.red = (double)ClampToQuantum(QuantumRange * background.red / count); image->background_color.green = (double)ClampToQuantum(QuantumRange * background.green / count); image->background_color.blue = (double)ClampToQuantum(QuantumRange * background.blue / count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha = (double)ClampToQuantum(QuantumRange * background.alpha / count); } MagickExport Image * DeskewImage(const Image * image, const double threshold, ExceptionInfo * exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image * clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* * Compute deskew angle. */ for (width = 1; width < ((image->columns + 7) / 8); width <<= 1); projection = (size_t *) AcquireQuantumMemory((size_t) (2 * width - 1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); status = RadonTransform(image, threshold, projection, exception); if (status == MagickFalse) { projection = (size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); } max_projection = 0; skew = 0; for (i = 0; i < (ssize_t) (2 * width - 1); i++) { if (projection[i] > max_projection) { skew = i - (ssize_t) width + 1; max_projection = projection[i]; } } projection = (size_t *) RelinquishMagickMemory(projection); degrees = RadiansToDegrees(-atan((double)skew / width / 8)); if (image->debug != MagickFalse) (void)LogMagickEvent(TransformEvent, GetMagickModule(), " Deskew angle: %g", degrees); /* * Deskew image. */ clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); { char angle[MagickPathExtent]; (void)FormatLocaleString(angle, MagickPathExtent, "%.20g", degrees); (void)SetImageArtifact(clone_image, "deskew:angle", angle); } (void)SetImageVirtualPixelMethod(clone_image, BackgroundVirtualPixelMethod, exception); affine_matrix.sx = cos(DegreesToRadians(fmod((double)degrees, 360.0))); affine_matrix.rx = sin(DegreesToRadians(fmod((double)degrees, 360.0))); affine_matrix.ry = (-sin(DegreesToRadians(fmod((double)degrees, 360.0)))); affine_matrix.sy = cos(DegreesToRadians(fmod((double)degrees, 360.0))); affine_matrix.tx = 0.0; affine_matrix.ty = 0.0; artifact = GetImageArtifact(image, "deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image = AffineTransformImage(clone_image, &affine_matrix, exception); clone_image = DestroyImage(clone_image); return (deskew_image); } /* * Auto-crop image. */ GetImageBackgroundColor(clone_image, (ssize_t) StringToLong(artifact), exception); deskew_image = AffineTransformImage(clone_image, &affine_matrix, exception); clone_image = DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return ((Image *) NULL); median_image = StatisticImage(deskew_image, MedianStatistic, 3, 3, exception); if (median_image == (Image *) NULL) { deskew_image = DestroyImage(deskew_image); return ((Image *) NULL); } geometry = GetImageBoundingBox(median_image, exception); median_image = DestroyImage(median_image); if (image->debug != MagickFalse) (void)LogMagickEvent(TransformEvent, GetMagickModule(), " Deskew geometry: " "%.20gx%.20g%+.20g%+.20g", (double)geometry.width, (double) geometry.height, (double)geometry.x, (double)geometry.y); crop_image = CropImage(deskew_image, &geometry, exception); deskew_image = DestroyImage(deskew_image); return (crop_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I n t e g r a l R o t a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IntegralRotateImage() rotates the image an integral of 90 * degrees. It % allocates the memory necessary for the new Image structure * and returns a % pointer to the rotated image. % % The format of the * IntegralRotateImage method is: % % Image *IntegralRotateImage(const * Image *image,size_t rotations, % ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image * IntegralRotateImage(const Image * image, size_t rotations, ExceptionInfo * exception) { #define RotateImageTag "Rotate/Image" CacheView * image_view, *rotate_view; Image * rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* * Initialize rotated image attributes. */ assert(image != (Image *) NULL); page = image->page; rotations %= 4; if (rotations == 0) return (CloneImage(image, 0, 0, MagickTrue, exception)); if ((rotations == 1) || (rotations == 3)) rotate_image = CloneImage(image, image->rows, image->columns, MagickTrue, exception); else rotate_image = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (rotate_image == (Image *) NULL) return ((Image *) NULL); /* * Integral rotate the image. */ status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); rotate_view = AcquireAuthenticCacheView(rotate_image, exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* * Rotate 90 degrees. */ GetPixelCacheTileSize(image, &tile_width, &tile_height); tile_width = image->columns; for (tile_y = 0; tile_y < (ssize_t) image->rows; tile_y += (ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x = 0; for (; tile_x < (ssize_t) image->columns; tile_x += (ssize_t) tile_width) { MagickBooleanType sync; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t y; size_t height, width; width = tile_width; if ((tile_x + (ssize_t) tile_width) > (ssize_t) image->columns) width = (size_t) (tile_width - (tile_x + tile_width - image->columns)); height = tile_height; if ((tile_y + (ssize_t) tile_height) > (ssize_t) image->rows) height = (size_t) (tile_height - (tile_y + tile_height - image->rows)); p = GetCacheViewVirtualPixels(image_view, tile_x, tile_y, width, height, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; break; } for (y = 0; y < (ssize_t) width; y++) { register const Quantum * magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(rotate_view, (ssize_t) (rotate_image->columns - (tile_y + height)), y + tile_x, height, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } tile_pixels = p + ((height - 1) * width + y) * GetPixelChannels(image); for (x = 0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image, tile_pixels) <= (QuantumRange / 2)) { tile_pixels -= width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image, channel, tile_pixels[i], q); } tile_pixels -= width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); } sync = SyncCacheViewAuthenticPixels(rotate_view, exception); if (sync == MagickFalse) status = MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RotateImageTag, progress += tile_height, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, RotateImageTag, (MagickOffsetType) image->rows - 1, image->rows); Swap(page.width, page.height); Swap(page.x, page.y); if (page.width != 0) page.x = (ssize_t) (page.width - rotate_image->columns - page.x); break; } case 2: { register ssize_t y; /* * Rotate 180 degrees. */ for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); q = QueueCacheViewAuthenticPixels(rotate_view, 0, (ssize_t) (image->rows - y - 1), image->columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } q += GetPixelChannels(rotate_image) * image->columns; for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; q -= GetPixelChannels(rotate_image); if (GetPixelWriteMask(image, p) <= (QuantumRange / 2)) { p += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image, channel, p[i], q); } p += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(rotate_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RotateImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, RotateImageTag, (MagickOffsetType) image->rows - 1, image->rows); if (page.width != 0) page.x = (ssize_t) (page.width - rotate_image->columns - page.x); if (page.height != 0) page.y = (ssize_t) (page.height - rotate_image->rows - page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* * Rotate 270 degrees. */ GetPixelCacheTileSize(image, &tile_width, &tile_height); tile_width = image->columns; for (tile_y = 0; tile_y < (ssize_t) image->rows; tile_y += (ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x = 0; for (; tile_x < (ssize_t) image->columns; tile_x += (ssize_t) tile_width) { MagickBooleanType sync; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t y; size_t height, width; width = tile_width; if ((tile_x + (ssize_t) tile_width) > (ssize_t) image->columns) width = (size_t) (tile_width - (tile_x + tile_width - image->columns)); height = tile_height; if ((tile_y + (ssize_t) tile_height) > (ssize_t) image->rows) height = (size_t) (tile_height - (tile_y + tile_height - image->rows)); p = GetCacheViewVirtualPixels(image_view, tile_x, tile_y, width, height, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; break; } for (y = 0; y < (ssize_t) width; y++) { register const Quantum * magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(rotate_view, tile_y, (ssize_t) (y + rotate_image->rows - (tile_x + width)), height, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } tile_pixels = p + ((width - 1) - y) * GetPixelChannels(image); for (x = 0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image, tile_pixels) <= (QuantumRange / 2)) { tile_pixels += width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image, channel, tile_pixels[i], q); } tile_pixels += width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); } sync = SyncCacheViewAuthenticPixels(rotate_view, exception); if (sync == MagickFalse) status = MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RotateImageTag, progress += tile_height, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, RotateImageTag, (MagickOffsetType) image->rows - 1, image->rows); Swap(page.width, page.height); Swap(page.x, page.y); if (page.height != 0) page.y = (ssize_t) (page.height - rotate_image->rows - page.y); break; } default: break; } rotate_view = DestroyCacheView(rotate_view); image_view = DestroyCacheView(image_view); rotate_image->type = image->type; rotate_image->page = page; if (status == MagickFalse) rotate_image = DestroyImage(rotate_image); return (rotate_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + X S h e a r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % XShearImage() shears the image in the X direction with a shear * angle of % 'degrees'. Positive angles shear counter-clockwise * (right-hand rule), and % negative angles shear clockwise. Angles are * measured relative to a vertical % Y-axis. X shears will widen an image * creating 'empty' triangles on the left % and right sides of the source * image. % % The format of the XShearImage method is: % % * MagickBooleanType XShearImage(Image *image,const double degrees, % * const size_t width,const size_t height, % const ssize_t * x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * degrees: A double representing the shearing angle along the X % axis. * % % o width, height, x_offset, y_offset: Defines a region of the image * % to shear. % % o exception: return any errors or warnings in this * structure. % */ static MagickBooleanType XShearImage(Image * image, const double degrees, const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* * X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = MagickTrue; background = image->background_color; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum * magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p = GetCacheViewAuthenticPixels(image_view, 0, y_offset + y, image->columns, 1, exception); if (p == (Quantum *) NULL) { status = MagickFalse; continue; } p += x_offset * GetPixelChannels(image); displacement = degrees * (double)(y - height / 2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction = RIGHT; else { displacement *= (-1.0); direction = LEFT; } step = (ssize_t) floor((double)displacement); area = (double)(displacement - step); step++; pixel = background; GetPixelInfo(image, &source); GetPixelInfo(image, &destination); switch (direction) { case LEFT: { /* * Transfer pixels left-to-right. */ if (step > x_offset) break; q = p - step * GetPixelChannels(image); for (i = 0; i < (ssize_t) width; i++) { if ((x_offset + i) < step) { p += GetPixelChannels(image); GetPixelInfoPixel(image, p, &pixel); q += GetPixelChannels(image); continue; } GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); p += GetPixelChannels(image); q += GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); SetPixelViaPixelInfo(image, &destination, q); q += GetPixelChannels(image); for (i = 0; i < (step - 1); i++) { SetPixelViaPixelInfo(image, &background, q); q += GetPixelChannels(image); } break; } case RIGHT: { /* * Transfer pixels right-to-left. */ p += width * GetPixelChannels(image); q = p + step * GetPixelChannels(image); for (i = 0; i < (ssize_t) width; i++) { p -= GetPixelChannels(image); q -= GetPixelChannels(image); if ((size_t) (x_offset + width + step - i) > image->columns) continue; GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &destination, q); for (i = 0; i < (step - 1); i++) { q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &background, q); } break; } } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, XShearImageTag, progress++, height); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + Y S h e a r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % YShearImage shears the image in the Y direction with a shear * angle of % 'degrees'. Positive angles shear counter-clockwise * (right-hand rule), and % negative angles shear clockwise. Angles are * measured relative to a % horizontal X-axis. Y shears will increase the * height of an image creating % 'empty' triangles on the top and bottom of * the source image. % % The format of the YShearImage method is: % % * MagickBooleanType YShearImage(Image *image,const double degrees, % * const size_t width,const size_t height, % const ssize_t * x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * degrees: A double representing the shearing angle along the Y % axis. * % % o width, height, x_offset, y_offset: Defines a region of the image * % to shear. % % o exception: return any errors or warnings in this * structure. % */ static MagickBooleanType YShearImage(Image * image, const double degrees, const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* * Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = MagickTrue; progress = 0; background = image->background_color; image_view = AcquireAuthenticCacheView(image, exception); for (x = 0; x < (ssize_t) width; x++) { ssize_t step; double area, displacement; PixelInfo pixel, source, destination; register Quantum * magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; if (status == MagickFalse) continue; p = GetCacheViewAuthenticPixels(image_view, x_offset + x, 0, 1, image->rows, exception); if (p == (Quantum *) NULL) { status = MagickFalse; continue; } p += y_offset * GetPixelChannels(image); displacement = degrees * (double)(x - width / 2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction = DOWN; else { displacement *= (-1.0); direction = UP; } step = (ssize_t) floor((double)displacement); area = (double)(displacement - step); step++; pixel = background; GetPixelInfo(image, &source); GetPixelInfo(image, &destination); switch (direction) { case UP: { /* * Transfer pixels top-to-bottom. */ if (step > y_offset) break; q = p - step * GetPixelChannels(image); for (i = 0; i < (ssize_t) height; i++) { if ((y_offset + i) < step) { p += GetPixelChannels(image); GetPixelInfoPixel(image, p, &pixel); q += GetPixelChannels(image); continue; } GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); p += GetPixelChannels(image); q += GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); SetPixelViaPixelInfo(image, &destination, q); q += GetPixelChannels(image); for (i = 0; i < (step - 1); i++) { SetPixelViaPixelInfo(image, &background, q); q += GetPixelChannels(image); } break; } case DOWN: { /* * Transfer pixels bottom-to-top. */ p += height * GetPixelChannels(image); q = p + step * GetPixelChannels(image); for (i = 0; i < (ssize_t) height; i++) { p -= GetPixelChannels(image); q -= GetPixelChannels(image); if ((size_t) (y_offset + height + step - i) > image->rows) continue; GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &destination, q); for (i = 0; i < (step - 1); i++) { q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &background, q); } break; } } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, YShearImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S h e a r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ShearImage() creates a new image that is a shear_image copy of * an existing % one. Shearing slides one edge of an image along the X or Y * axis, creating % a parallelogram. An X direction shear slides an edge * along the X axis, % while a Y direction shear slides an edge along the Y * axis. The amount of % the shear is controlled by a shear angle. For X * direction shears, x_shear % is measured relative to the Y axis, and * similarly, for Y direction shears % y_shear is measured relative to the X * axis. Empty triangles left over from % shearing the image are filled * with the background color defined by member % 'background_color' of the * image.. ShearImage() allocates the memory % necessary for the new Image * structure and returns a pointer to the new image. % % ShearImage() is * based on the paper "A Fast Algorithm for General Raster % Rotatation" by * Alan W. Paeth. % % The format of the ShearImage method is: % % Image * *ShearImage(const Image *image,const double x_shear, % const double * y_shear,ExceptionInfo *exception) % % A description of each parameter * follows. % % o image: the image. % % o x_shear, y_shear: Specifies * the number of degrees to shear the image. % % o exception: return any * errors or warnings in this structure. % */ MagickExport Image * ShearImage(const Image * image, const double x_shear, const double y_shear, ExceptionInfo * exception) { Image * integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear, 90.0) == 0.0)) ThrowImageException(ImageError, "AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear, 90.0) == 0.0)) ThrowImageException(ImageError, "AngleIsDiscontinuous"); /* * Initialize shear angle. */ integral_image = CloneImage(image, 0, 0, MagickTrue, exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); shear.x = (-tan(DegreesToRadians(fmod(x_shear, 360.0)))); shear.y = tan(DegreesToRadians(fmod(y_shear, 360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return (integral_image); if (SetImageStorageClass(integral_image, DirectClass, exception) == MagickFalse) { integral_image = DestroyImage(integral_image); return (integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(integral_image, OpaqueAlphaChannel, exception); /* * Compute image size. */ bounds.width = image->columns + (ssize_t) floor(fabs(shear.x) * image->rows + 0.5); bounds.x = (ssize_t) ceil((double)image->columns + ((fabs(shear.x) * image->rows) - image->columns) / 2.0 - 0.5); bounds.y = (ssize_t) ceil((double)image->rows + ((fabs(shear.y) * bounds.width) - image->rows) / 2.0 - 0.5); /* * Surround image with border. */ integral_image->border_color = integral_image->background_color; integral_image->compose = CopyCompositeOp; border_info.width = (size_t) bounds.x; border_info.height = (size_t) bounds.y; shear_image = BorderImage(integral_image, &border_info, image->compose, exception); integral_image = DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); /* * Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(shear_image, OpaqueAlphaChannel, exception); status = XShearImage(shear_image, shear.x, image->columns, image->rows, bounds.x, (ssize_t) (shear_image->rows - image->rows) / 2, exception); if (status == MagickFalse) { shear_image = DestroyImage(shear_image); return ((Image *) NULL); } status = YShearImage(shear_image, shear.y, bounds.width, image->rows, (ssize_t) (shear_image->columns - bounds.width) / 2, bounds.y, exception); if (status == MagickFalse) { shear_image = DestroyImage(shear_image); return ((Image *) NULL); } status = CropToFitImage(&shear_image, shear.x, shear.y, (MagickRealType) image->columns, (MagickRealType) image->rows, MagickFalse, exception); shear_image->alpha_trait = image->alpha_trait; shear_image->compose = image->compose; shear_image->page.width = 0; shear_image->page.height = 0; if (status == MagickFalse) shear_image = DestroyImage(shear_image); return (shear_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S h e a r R o t a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ShearRotateImage() creates a new image that is a rotated copy of * an existing % one. Positive angles rotate counter-clockwise (right-hand * rule), while % negative angles rotate clockwise. Rotated images are * usually larger than % the originals and have 'empty' triangular corners. * X axis. Empty % triangles left over from shearing the image are filled * with the background % color defined by member 'background_color' of the * image. ShearRotateImage % allocates the memory necessary for the new * Image structure and returns a % pointer to the new image. % % * ShearRotateImage() is based on the paper "A Fast Algorithm for General % * Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % * similar method based on the Paeth paper written by Michael Halle of the % * Spatial Imaging Group, MIT Media Lab. % % The format of the * ShearRotateImage method is: % % Image *ShearRotateImage(const Image * *image,const double degrees, % ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * degrees: Specifies the number of degrees to rotate the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport Image * ShearRotateImage(const Image * image, const double degrees, ExceptionInfo * exception) { Image * integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* * Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle = degrees - 360.0 * (ssize_t) (degrees / 360.0); if (angle < -45.0) angle += 360.0; for (rotations = 0; angle > 45.0; rotations++) angle -= 90.0; rotations %= 4; /* * Calculate shear equations. */ integral_image = IntegralRotateImage(image, rotations, exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); shear.x = (-tan((double)DegreesToRadians(angle) / 2.0)); shear.y = sin((double)DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return (integral_image); if (SetImageStorageClass(integral_image, DirectClass, exception) == MagickFalse) { integral_image = DestroyImage(integral_image); return (integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(integral_image, OpaqueAlphaChannel, exception); /* * Compute maximum bounds for 3 shear operations. */ width = integral_image->columns; height = integral_image->rows; bounds.width = (size_t) floor(fabs((double)height * shear.x) + width + 0.5); bounds.height = (size_t) floor(fabs((double)bounds.width * shear.y) + height + 0.5); shear_width = (size_t) floor(fabs((double)bounds.height * shear.x) + bounds.width + 0.5); bounds.x = (ssize_t) floor((double)((shear_width > bounds.width) ? width : bounds.width - shear_width + 2) / 2.0 + 0.5); bounds.y = (ssize_t) floor(((double)bounds.height - height + 2) / 2.0 + 0.5); /* * Surround image with a border. */ integral_image->border_color = integral_image->background_color; integral_image->compose = CopyCompositeOp; border_info.width = (size_t) bounds.x; border_info.height = (size_t) bounds.y; rotate_image = BorderImage(integral_image, &border_info, image->compose, exception); integral_image = DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); /* * Rotate the image. */ status = XShearImage(rotate_image, shear.x, width, height, bounds.x, (ssize_t) (rotate_image->rows - height) / 2, exception); if (status == MagickFalse) { rotate_image = DestroyImage(rotate_image); return ((Image *) NULL); } status = YShearImage(rotate_image, shear.y, bounds.width, height, (ssize_t) (rotate_image->columns - bounds.width) / 2, bounds.y, exception); if (status == MagickFalse) { rotate_image = DestroyImage(rotate_image); return ((Image *) NULL); } status = XShearImage(rotate_image, shear.x, bounds.width, bounds.height, (ssize_t) (rotate_image->columns - bounds.width) / 2, (ssize_t) (rotate_image->rows - bounds.height) / 2, exception); if (status == MagickFalse) { rotate_image = DestroyImage(rotate_image); return ((Image *) NULL); } status = CropToFitImage(&rotate_image, shear.x, shear.y, (MagickRealType) width, (MagickRealType) height, MagickTrue, exception); rotate_image->alpha_trait = image->alpha_trait; rotate_image->compose = image->compose; rotate_image->page.width = 0; rotate_image->page.height = 0; if (status == MagickFalse) rotate_image = DestroyImage(rotate_image); return (rotate_image); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + C r o p T o F i t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CropToFitImage() crops the sheared image as determined by the * bounding box % as defined by width and height and shearing angles. % % * The format of the CropToFitImage method is: % % MagickBooleanType * CropToFitImage(Image **image, % const double x_shear,const double * x_shear, % const double width,const double height, % const * MagickBooleanType rotate,ExceptionInfo *exception) % % A description of * each parameter follows. % % o image: the image. % % o x_shear, * y_shear, width, height: Defines a region of the image to crop. % % o * exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image ** image, const double x_shear, const double y_shear, const double width, const double height, const MagickBooleanType rotate, ExceptionInfo * exception) { Image * crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* * Calculate the rotated image size. */ extent[0].x = (double)(-width / 2.0); extent[0].y = (double)(-height / 2.0); extent[1].x = (double)width / 2.0; extent[1].y = (double)(-height / 2.0); extent[2].x = (double)(-width / 2.0); extent[2].y = (double)height / 2.0; extent[3].x = (double)width / 2.0; extent[3].y = (double)height / 2.0; for (i = 0; i < 4; i++) { extent[i].x += x_shear * extent[i].y; extent[i].y += y_shear * extent[i].x; if (rotate != MagickFalse) extent[i].x += x_shear * extent[i].y; extent[i].x += (double)(*image)->columns / 2.0; extent[i].y += (double)(*image)->rows / 2.0; } min = extent[0]; max = extent[0]; for (i = 1; i < 4; i++) { if (min.x > extent[i].x) min.x = extent[i].x; if (min.y > extent[i].y) min.y = extent[i].y; if (max.x < extent[i].x) max.x = extent[i].x; if (max.y < extent[i].y) max.y = extent[i].y; } geometry.x = (ssize_t) ceil(min.x - 0.5); geometry.y = (ssize_t) ceil(min.y - 0.5); geometry.width = (size_t) floor(max.x - min.x + 0.5); geometry.height = (size_t) floor(max.y - min.y + 0.5); page = (*image)->page; (void)ParseAbsoluteGeometry("0x0+0+0", &(*image)->page); crop_image = CropImage(*image, &geometry, exception); if (crop_image == (Image *) NULL) return (MagickFalse); crop_image->page = page; *image = DestroyImage(*image); *image = crop_image; return (MagickTrue); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % D e s k e w I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % DeskewImage() removes skew from the image. Skew is an artifact * that % occurs in scanned images because of the camera being misaligned, % * imperfections in the scanning or surface, or simply because the paper was * % not placed completely flat when scanned. % % The result will be * auto-croped if the artifact "deskew:auto-crop" is % defined, while the * amount the image is to be deskewed, in degrees is also % saved as the * artifact "deskew:angle". % % The format of the DeskewImage method is: % % * Image *DeskewImage(const Image *image,const double threshold, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o threshold: separate background from * foreground. % % o exception: return any errors or warnings in this * structure. % */ static void RadonProjection(const Image * image, MatrixInfo * source_matrixs, MatrixInfo * destination_matrixs, const ssize_t sign, size_t * projection) { MatrixInfo * swap; register MatrixInfo * p, *q; register ssize_t x; size_t step; p = source_matrixs; q = destination_matrixs; for (step = 1; step < GetMatrixColumns(p); step *= 2) { for (x = 0; x < (ssize_t) GetMatrixColumns(p); x += 2 * (ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i = 0; i < (ssize_t) step; i++) { for (y = 0; y < (ssize_t) (GetMatrixRows(p) - i - 1); y++) { if (GetMatrixElement(p, x + i, y, &element) == MagickFalse) continue; if (GetMatrixElement(p, x + i + step, y + i, &neighbor) == MagickFalse) continue; neighbor += element; if (SetMatrixElement(q, x + 2 * i, y, &neighbor) == MagickFalse) continue; if (GetMatrixElement(p, x + i + step, y + i + 1, &neighbor) == MagickFalse) continue; neighbor += element; if (SetMatrixElement(q, x + 2 * i + 1, y, &neighbor) == MagickFalse) continue; } for (; y < (ssize_t) (GetMatrixRows(p) - i); y++) { if (GetMatrixElement(p, x + i, y, &element) == MagickFalse) continue; if (GetMatrixElement(p, x + i + step, y + i, &neighbor) == MagickFalse) continue; neighbor += element; if (SetMatrixElement(q, x + 2 * i, y, &neighbor) == MagickFalse) continue; if (SetMatrixElement(q, x + 2 * i + 1, y, &element) == MagickFalse) continue; } for (; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p, x + i, y, &element) == MagickFalse) continue; if (SetMatrixElement(q, x + 2 * i, y, &element) == MagickFalse) continue; if (SetMatrixElement(q, x + 2 * i + 1, y, &element) == MagickFalse) continue; } } } swap = p; p = q; q = swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x = 0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum = 0; for (y = 0; y < (ssize_t) (GetMatrixRows(p) - 1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p, x, y, &element) == MagickFalse) continue; if (GetMatrixElement(p, x, y + 1, &neighbor) == MagickFalse) continue; delta = (ssize_t) element - (ssize_t) neighbor; sum += delta * delta; } projection[GetMatrixColumns(p) + sign * x - 1] = sum; } } static MagickBooleanType RadonTransform(const Image * image, const double threshold, size_t * projection, ExceptionInfo * exception) { CacheView * image_view; MatrixInfo * destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width = 1; width < ((image->columns + 7) / 8); width <<= 1); source_matrixs = AcquireMatrixInfo(width, image->rows, sizeof(unsigned short), exception); destination_matrixs = AcquireMatrixInfo(width, image->rows, sizeof(unsigned short), exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs = DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs = DestroyMatrixInfo(source_matrixs); return (MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs = DestroyMatrixInfo(destination_matrixs); source_matrixs = DestroyMatrixInfo(source_matrixs); return (MagickFalse); } for (j = 0; j < 256; j++) { c = (unsigned char)j; for (count = 0; c != 0; c >>= 1) count += c & 0x01; bits[j] = (unsigned short)count; } status = MagickTrue; image_view = AcquireVirtualCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } bit = 0; byte = 0; i = (ssize_t) (image->columns + 7) / 8; for (x = 0; x < (ssize_t) image->columns; x++) { byte <<= 1; if (((MagickRealType) GetPixelRed(image, p) < threshold) || ((MagickRealType) GetPixelGreen(image, p) < threshold) || ((MagickRealType) GetPixelBlue(image, p) < threshold)) byte |= 0x01; bit++; if (bit == 8) { value = bits[byte]; (void)SetMatrixElement(source_matrixs, --i, y, &value); bit = 0; byte = 0; } p += GetPixelChannels(image); } if (bit != 0) { byte <<= (8 - bit); value = bits[byte]; (void)SetMatrixElement(source_matrixs, --i, y, &value); } } RadonProjection(image, source_matrixs, destination_matrixs, -1, projection); (void)NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } bit = 0; byte = 0; i = 0; for (x = 0; x < (ssize_t) image->columns; x++) { byte <<= 1; if (((MagickRealType) GetPixelRed(image, p) < threshold) || ((MagickRealType) GetPixelGreen(image, p) < threshold) || ((MagickRealType) GetPixelBlue(image, p) < threshold)) byte |= 0x01; bit++; if (bit == 8) { value = bits[byte]; (void)SetMatrixElement(source_matrixs, i++, y, &value); bit = 0; byte = 0; } p += GetPixelChannels(image); } if (bit != 0) { byte <<= (8 - bit); value = bits[byte]; (void)SetMatrixElement(source_matrixs, i++, y, &value); } } RadonProjection(image, source_matrixs, destination_matrixs, 1, projection); image_view = DestroyCacheView(image_view); destination_matrixs = DestroyMatrixInfo(destination_matrixs); source_matrixs = DestroyMatrixInfo(source_matrixs); return (MagickTrue); } static void GetImageBackgroundColor(Image * image, const ssize_t offset, ExceptionInfo * exception) { CacheView * image_view; PixelInfo background; double count; ssize_t y; /* * Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image, &background); count = 0.0; image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows - offset))) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) continue; for (x = 0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns - offset))) continue; background.red += QuantumScale * GetPixelRed(image, p); background.green += QuantumScale * GetPixelGreen(image, p); background.blue += QuantumScale * GetPixelBlue(image, p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha += QuantumScale * GetPixelAlpha(image, p); count++; p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); image->background_color.red = (double)ClampToQuantum(QuantumRange * background.red / count); image->background_color.green = (double)ClampToQuantum(QuantumRange * background.green / count); image->background_color.blue = (double)ClampToQuantum(QuantumRange * background.blue / count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha = (double)ClampToQuantum(QuantumRange * background.alpha / count); } MagickExport Image * DeskewImage(const Image * image, const double threshold, ExceptionInfo * exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image * clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* * Compute deskew angle. */ for (width = 1; width < ((image->columns + 7) / 8); width <<= 1); projection = (size_t *) AcquireQuantumMemory((size_t) (2 * width - 1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); status = RadonTransform(image, threshold, projection, exception); if (status == MagickFalse) { projection = (size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); } max_projection = 0; skew = 0; for (i = 0; i < (ssize_t) (2 * width - 1); i++) { if (projection[i] > max_projection) { skew = i - (ssize_t) width + 1; max_projection = projection[i]; } } projection = (size_t *) RelinquishMagickMemory(projection); degrees = RadiansToDegrees(-atan((double)skew / width / 8)); if (image->debug != MagickFalse) (void)LogMagickEvent(TransformEvent, GetMagickModule(), " Deskew angle: %g", degrees); /* * Deskew image. */ clone_image = CloneImage(image, 0, 0, MagickTrue, exception); if (clone_image == (Image *) NULL) return ((Image *) NULL); { char angle[MagickPathExtent]; (void)FormatLocaleString(angle, MagickPathExtent, "%.20g", degrees); (void)SetImageArtifact(clone_image, "deskew:angle", angle); } (void)SetImageVirtualPixelMethod(clone_image, BackgroundVirtualPixelMethod, exception); affine_matrix.sx = cos(DegreesToRadians(fmod((double)degrees, 360.0))); affine_matrix.rx = sin(DegreesToRadians(fmod((double)degrees, 360.0))); affine_matrix.ry = (-sin(DegreesToRadians(fmod((double)degrees, 360.0)))); affine_matrix.sy = cos(DegreesToRadians(fmod((double)degrees, 360.0))); affine_matrix.tx = 0.0; affine_matrix.ty = 0.0; artifact = GetImageArtifact(image, "deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image = AffineTransformImage(clone_image, &affine_matrix, exception); clone_image = DestroyImage(clone_image); return (deskew_image); } /* * Auto-crop image. */ GetImageBackgroundColor(clone_image, (ssize_t) StringToLong(artifact), exception); deskew_image = AffineTransformImage(clone_image, &affine_matrix, exception); clone_image = DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return ((Image *) NULL); median_image = StatisticImage(deskew_image, MedianStatistic, 3, 3, exception); if (median_image == (Image *) NULL) { deskew_image = DestroyImage(deskew_image); return ((Image *) NULL); } geometry = GetImageBoundingBox(median_image, exception); median_image = DestroyImage(median_image); if (image->debug != MagickFalse) (void)LogMagickEvent(TransformEvent, GetMagickModule(), " Deskew geometry: " "%.20gx%.20g%+.20g%+.20g", (double)geometry.width, (double) geometry.height, (double)geometry.x, (double)geometry.y); crop_image = CropImage(deskew_image, &geometry, exception); deskew_image = DestroyImage(deskew_image); return (crop_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I n t e g r a l R o t a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IntegralRotateImage() rotates the image an integral of 90 * degrees. It % allocates the memory necessary for the new Image structure * and returns a % pointer to the rotated image. % % The format of the * IntegralRotateImage method is: % % Image *IntegralRotateImage(const * Image *image,size_t rotations, % ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image * IntegralRotateImage(const Image * image, size_t rotations, ExceptionInfo * exception) { #define RotateImageTag "Rotate/Image" CacheView * image_view, *rotate_view; Image * rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* * Initialize rotated image attributes. */ assert(image != (Image *) NULL); page = image->page; rotations %= 4; if (rotations == 0) return (CloneImage(image, 0, 0, MagickTrue, exception)); if ((rotations == 1) || (rotations == 3)) rotate_image = CloneImage(image, image->rows, image->columns, MagickTrue, exception); else rotate_image = CloneImage(image, image->columns, image->rows, MagickTrue, exception); if (rotate_image == (Image *) NULL) return ((Image *) NULL); /* * Integral rotate the image. */ status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); rotate_view = AcquireAuthenticCacheView(rotate_image, exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* * Rotate 90 degrees. */ GetPixelCacheTileSize(image, &tile_width, &tile_height); tile_width = image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y = 0; tile_y < (ssize_t) image->rows; tile_y += (ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x = 0; for (; tile_x < (ssize_t) image->columns; tile_x += (ssize_t) tile_width) { MagickBooleanType sync; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t y; size_t height, width; width = tile_width; if ((tile_x + (ssize_t) tile_width) > (ssize_t) image->columns) width = (size_t) (tile_width - (tile_x + tile_width - image->columns)); height = tile_height; if ((tile_y + (ssize_t) tile_height) > (ssize_t) image->rows) height = (size_t) (tile_height - (tile_y + tile_height - image->rows)); p = GetCacheViewVirtualPixels(image_view, tile_x, tile_y, width, height, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; break; } for (y = 0; y < (ssize_t) width; y++) { register const Quantum * magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(rotate_view, (ssize_t) (rotate_image->columns - (tile_y + height)), y + tile_x, height, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } tile_pixels = p + ((height - 1) * width + y) * GetPixelChannels(image); for (x = 0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image, tile_pixels) <= (QuantumRange / 2)) { tile_pixels -= width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image, channel, tile_pixels[i], q); } tile_pixels -= width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); } sync = SyncCacheViewAuthenticPixels(rotate_view, exception); if (sync == MagickFalse) status = MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed = SetImageProgress(image, RotateImageTag, progress += tile_height, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, RotateImageTag, (MagickOffsetType) image->rows - 1, image->rows); Swap(page.width, page.height); Swap(page.x, page.y); if (page.width != 0) page.x = (ssize_t) (page.width - rotate_image->columns - page.x); break; } case 2: { register ssize_t y; /* * Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); q = QueueCacheViewAuthenticPixels(rotate_view, 0, (ssize_t) (image->rows - y - 1), image->columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } q += GetPixelChannels(rotate_image) * image->columns; for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; q -= GetPixelChannels(rotate_image); if (GetPixelWriteMask(image, p) <= (QuantumRange / 2)) { p += GetPixelChannels(image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image, channel, p[i], q); } p += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(rotate_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed = SetImageProgress(image, RotateImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, RotateImageTag, (MagickOffsetType) image->rows - 1, image->rows); if (page.width != 0) page.x = (ssize_t) (page.width - rotate_image->columns - page.x); if (page.height != 0) page.y = (ssize_t) (page.height - rotate_image->rows - page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* * Rotate 270 degrees. */ GetPixelCacheTileSize(image, &tile_width, &tile_height); tile_width = image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y = 0; tile_y < (ssize_t) image->rows; tile_y += (ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x = 0; for (; tile_x < (ssize_t) image->columns; tile_x += (ssize_t) tile_width) { MagickBooleanType sync; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t y; size_t height, width; width = tile_width; if ((tile_x + (ssize_t) tile_width) > (ssize_t) image->columns) width = (size_t) (tile_width - (tile_x + tile_width - image->columns)); height = tile_height; if ((tile_y + (ssize_t) tile_height) > (ssize_t) image->rows) height = (size_t) (tile_height - (tile_y + tile_height - image->rows)); p = GetCacheViewVirtualPixels(image_view, tile_x, tile_y, width, height, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; break; } for (y = 0; y < (ssize_t) width; y++) { register const Quantum * magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q = QueueCacheViewAuthenticPixels(rotate_view, tile_y, (ssize_t) (y + rotate_image->rows - (tile_x + width)), height, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } tile_pixels = p + ((width - 1) - y) * GetPixelChannels(image); for (x = 0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image, tile_pixels) <= (QuantumRange / 2)) { tile_pixels += width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); continue; } for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image, channel, tile_pixels[i], q); } tile_pixels += width * GetPixelChannels(image); q += GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync = SyncCacheViewAuthenticPixels(rotate_view, exception); if (sync == MagickFalse) status = MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, RotateImageTag, progress += tile_height, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } (void)SetImageProgress(image, RotateImageTag, (MagickOffsetType) image->rows - 1, image->rows); Swap(page.width, page.height); Swap(page.x, page.y); if (page.height != 0) page.y = (ssize_t) (page.height - rotate_image->rows - page.y); break; } default: break; } rotate_view = DestroyCacheView(rotate_view); image_view = DestroyCacheView(image_view); rotate_image->type = image->type; rotate_image->page = page; if (status == MagickFalse) rotate_image = DestroyImage(rotate_image); return (rotate_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + X S h e a r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % XShearImage() shears the image in the X direction with a shear * angle of % 'degrees'. Positive angles shear counter-clockwise * (right-hand rule), and % negative angles shear clockwise. Angles are * measured relative to a vertical % Y-axis. X shears will widen an image * creating 'empty' triangles on the left % and right sides of the source * image. % % The format of the XShearImage method is: % % * MagickBooleanType XShearImage(Image *image,const double degrees, % * const size_t width,const size_t height, % const ssize_t * x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * degrees: A double representing the shearing angle along the X % axis. * % % o width, height, x_offset, y_offset: Defines a region of the image * % to shear. % % o exception: return any errors or warnings in this * structure. % */ static MagickBooleanType XShearImage(Image * image, const double degrees, const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* * X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = MagickTrue; background = image->background_color; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y = 0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum * magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p = GetCacheViewAuthenticPixels(image_view, 0, y_offset + y, image->columns, 1, exception); if (p == (Quantum *) NULL) { status = MagickFalse; continue; } p += x_offset * GetPixelChannels(image); displacement = degrees * (double)(y - height / 2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction = RIGHT; else { displacement *= (-1.0); direction = LEFT; } step = (ssize_t) floor((double)displacement); area = (double)(displacement - step); step++; pixel = background; GetPixelInfo(image, &source); GetPixelInfo(image, &destination); switch (direction) { case LEFT: { /* * Transfer pixels left-to-right. */ if (step > x_offset) break; q = p - step * GetPixelChannels(image); for (i = 0; i < (ssize_t) width; i++) { if ((x_offset + i) < step) { p += GetPixelChannels(image); GetPixelInfoPixel(image, p, &pixel); q += GetPixelChannels(image); continue; } GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); p += GetPixelChannels(image); q += GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); SetPixelViaPixelInfo(image, &destination, q); q += GetPixelChannels(image); for (i = 0; i < (step - 1); i++) { SetPixelViaPixelInfo(image, &background, q); q += GetPixelChannels(image); } break; } case RIGHT: { /* * Transfer pixels right-to-left. */ p += width * GetPixelChannels(image); q = p + step * GetPixelChannels(image); for (i = 0; i < (ssize_t) width; i++) { p -= GetPixelChannels(image); q -= GetPixelChannels(image); if ((size_t) (x_offset + width + step - i) > image->columns) continue; GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &destination, q); for (i = 0; i < (step - 1); i++) { q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &background, q); } break; } } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_XShearImage) #endif proceed = SetImageProgress(image, XShearImageTag, progress++, height); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % + Y S h e a r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % YShearImage shears the image in the Y direction with a shear * angle of % 'degrees'. Positive angles shear counter-clockwise * (right-hand rule), and % negative angles shear clockwise. Angles are * measured relative to a % horizontal X-axis. Y shears will increase the * height of an image creating % 'empty' triangles on the top and bottom of * the source image. % % The format of the YShearImage method is: % % * MagickBooleanType YShearImage(Image *image,const double degrees, % * const size_t width,const size_t height, % const ssize_t * x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * degrees: A double representing the shearing angle along the Y % axis. * % % o width, height, x_offset, y_offset: Defines a region of the image * % to shear. % % o exception: return any errors or warnings in this * structure. % */ static MagickBooleanType YShearImage(Image * image, const double degrees, const size_t width, const size_t height, const ssize_t x_offset, const ssize_t y_offset, ExceptionInfo * exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* * Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); status = MagickTrue; progress = 0; background = image->background_color; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x = 0; x < (ssize_t) width; x++) { ssize_t step; double area, displacement; PixelInfo pixel, source, destination; register Quantum * magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; if (status == MagickFalse) continue; p = GetCacheViewAuthenticPixels(image_view, x_offset + x, 0, 1, image->rows, exception); if (p == (Quantum *) NULL) { status = MagickFalse; continue; } p += y_offset * GetPixelChannels(image); displacement = degrees * (double)(x - width / 2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction = DOWN; else { displacement *= (-1.0); direction = UP; } step = (ssize_t) floor((double)displacement); area = (double)(displacement - step); step++; pixel = background; GetPixelInfo(image, &source); GetPixelInfo(image, &destination); switch (direction) { case UP: { /* * Transfer pixels top-to-bottom. */ if (step > y_offset) break; q = p - step * GetPixelChannels(image); for (i = 0; i < (ssize_t) height; i++) { if ((y_offset + i) < step) { p += GetPixelChannels(image); GetPixelInfoPixel(image, p, &pixel); q += GetPixelChannels(image); continue; } GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); p += GetPixelChannels(image); q += GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); SetPixelViaPixelInfo(image, &destination, q); q += GetPixelChannels(image); for (i = 0; i < (step - 1); i++) { SetPixelViaPixelInfo(image, &background, q); q += GetPixelChannels(image); } break; } case DOWN: { /* * Transfer pixels bottom-to-top. */ p += height * GetPixelChannels(image); q = p + step * GetPixelChannels(image); for (i = 0; i < (ssize_t) height; i++) { p -= GetPixelChannels(image); q -= GetPixelChannels(image); if ((size_t) (y_offset + height + step - i) > image->rows) continue; GetPixelInfoPixel(image, p, &source); CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &source, (double)GetPixelAlpha(image, p), area, &destination); SetPixelViaPixelInfo(image, &destination, q); GetPixelInfoPixel(image, p, &pixel); } CompositePixelInfoAreaBlend(&pixel, (double)pixel.alpha, &background, (double)background.alpha, area, &destination); q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &destination, q); for (i = 0; i < (step - 1); i++) { q -= GetPixelChannels(image); SetPixelViaPixelInfo(image, &background, q); } break; } } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_YShearImage) #endif proceed = SetImageProgress(image, YShearImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S h e a r I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ShearImage() creates a new image that is a shear_image copy of * an existing % one. Shearing slides one edge of an image along the X or Y * axis, creating % a parallelogram. An X direction shear slides an edge * along the X axis, % while a Y direction shear slides an edge along the Y * axis. The amount of % the shear is controlled by a shear angle. For X * direction shears, x_shear % is measured relative to the Y axis, and * similarly, for Y direction shears % y_shear is measured relative to the X * axis. Empty triangles left over from % shearing the image are filled * with the background color defined by member % 'background_color' of the * image.. ShearImage() allocates the memory % necessary for the new Image * structure and returns a pointer to the new image. % % ShearImage() is * based on the paper "A Fast Algorithm for General Raster % Rotatation" by * Alan W. Paeth. % % The format of the ShearImage method is: % % Image * *ShearImage(const Image *image,const double x_shear, % const double * y_shear,ExceptionInfo *exception) % % A description of each parameter * follows. % % o image: the image. % % o x_shear, y_shear: Specifies * the number of degrees to shear the image. % % o exception: return any * errors or warnings in this structure. % */ MagickExport Image * ShearImage(const Image * image, const double x_shear, const double y_shear, ExceptionInfo * exception) { Image * integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear, 90.0) == 0.0)) ThrowImageException(ImageError, "AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear, 90.0) == 0.0)) ThrowImageException(ImageError, "AngleIsDiscontinuous"); /* * Initialize shear angle. */ integral_image = CloneImage(image, 0, 0, MagickTrue, exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); shear.x = (-tan(DegreesToRadians(fmod(x_shear, 360.0)))); shear.y = tan(DegreesToRadians(fmod(y_shear, 360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return (integral_image); if (SetImageStorageClass(integral_image, DirectClass, exception) == MagickFalse) { integral_image = DestroyImage(integral_image); return (integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(integral_image, OpaqueAlphaChannel, exception); /* * Compute image size. */ bounds.width = image->columns + (ssize_t) floor(fabs(shear.x) * image->rows + 0.5); bounds.x = (ssize_t) ceil((double)image->columns + ((fabs(shear.x) * image->rows) - image->columns) / 2.0 - 0.5); bounds.y = (ssize_t) ceil((double)image->rows + ((fabs(shear.y) * bounds.width) - image->rows) / 2.0 - 0.5); /* * Surround image with border. */ integral_image->border_color = integral_image->background_color; integral_image->compose = CopyCompositeOp; border_info.width = (size_t) bounds.x; border_info.height = (size_t) bounds.y; shear_image = BorderImage(integral_image, &border_info, image->compose, exception); integral_image = DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); /* * Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(shear_image, OpaqueAlphaChannel, exception); status = XShearImage(shear_image, shear.x, image->columns, image->rows, bounds.x, (ssize_t) (shear_image->rows - image->rows) / 2, exception); if (status == MagickFalse) { shear_image = DestroyImage(shear_image); return ((Image *) NULL); } status = YShearImage(shear_image, shear.y, bounds.width, image->rows, (ssize_t) (shear_image->columns - bounds.width) / 2, bounds.y, exception); if (status == MagickFalse) { shear_image = DestroyImage(shear_image); return ((Image *) NULL); } status = CropToFitImage(&shear_image, shear.x, shear.y, (MagickRealType) image->columns, (MagickRealType) image->rows, MagickFalse, exception); shear_image->alpha_trait = image->alpha_trait; shear_image->compose = image->compose; shear_image->page.width = 0; shear_image->page.height = 0; if (status == MagickFalse) shear_image = DestroyImage(shear_image); return (shear_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S h e a r R o t a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ShearRotateImage() creates a new image that is a rotated copy of * an existing % one. Positive angles rotate counter-clockwise (right-hand * rule), while % negative angles rotate clockwise. Rotated images are * usually larger than % the originals and have 'empty' triangular corners. * X axis. Empty % triangles left over from shearing the image are filled * with the background % color defined by member 'background_color' of the * image. ShearRotateImage % allocates the memory necessary for the new * Image structure and returns a % pointer to the new image. % % * ShearRotateImage() is based on the paper "A Fast Algorithm for General % * Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % * similar method based on the Paeth paper written by Michael Halle of the % * Spatial Imaging Group, MIT Media Lab. % % The format of the * ShearRotateImage method is: % % Image *ShearRotateImage(const Image * *image,const double degrees, % ExceptionInfo *exception) % % A * description of each parameter follows. % % o image: the image. % % o * degrees: Specifies the number of degrees to rotate the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport Image * ShearRotateImage(const Image * image, const double degrees, ExceptionInfo * exception) { Image * integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* * Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle = degrees - 360.0 * (ssize_t) (degrees / 360.0); if (angle < -45.0) angle += 360.0; for (rotations = 0; angle > 45.0; rotations++) angle -= 90.0; rotations %= 4; /* * Calculate shear equations. */ integral_image = IntegralRotateImage(image, rotations, exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); shear.x = (-tan((double)DegreesToRadians(angle) / 2.0)); shear.y = sin((double)DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return (integral_image); if (SetImageStorageClass(integral_image, DirectClass, exception) == MagickFalse) { integral_image = DestroyImage(integral_image); return (integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(integral_image, OpaqueAlphaChannel, exception); /* * Compute maximum bounds for 3 shear operations. */ width = integral_image->columns; height = integral_image->rows; bounds.width = (size_t) floor(fabs((double)height * shear.x) + width + 0.5); bounds.height = (size_t) floor(fabs((double)bounds.width * shear.y) + height + 0.5); shear_width = (size_t) floor(fabs((double)bounds.height * shear.x) + bounds.width + 0.5); bounds.x = (ssize_t) floor((double)((shear_width > bounds.width) ? width : bounds.width - shear_width + 2) / 2.0 + 0.5); bounds.y = (ssize_t) floor(((double)bounds.height - height + 2) / 2.0 + 0.5); /* * Surround image with a border. */ integral_image->border_color = integral_image->background_color; integral_image->compose = CopyCompositeOp; border_info.width = (size_t) bounds.x; border_info.height = (size_t) bounds.y; rotate_image = BorderImage(integral_image, &border_info, image->compose, exception); integral_image = DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError, "MemoryAllocationFailed"); /* * Rotate the image. */ status = XShearImage(rotate_image, shear.x, width, height, bounds.x, (ssize_t) (rotate_image->rows - height) / 2, exception); if (status == MagickFalse) { rotate_image = DestroyImage(rotate_image); return ((Image *) NULL); } status = YShearImage(rotate_image, shear.y, bounds.width, height, (ssize_t) (rotate_image->columns - bounds.width) / 2, bounds.y, exception); if (status == MagickFalse) { rotate_image = DestroyImage(rotate_image); return ((Image *) NULL); } status = XShearImage(rotate_image, shear.x, bounds.width, bounds.height, (ssize_t) (rotate_image->columns - bounds.width) / 2, (ssize_t) (rotate_image->rows - bounds.height) / 2, exception); if (status == MagickFalse) { rotate_image = DestroyImage(rotate_image); return ((Image *) NULL); } status = CropToFitImage(&rotate_image, shear.x, shear.y, (MagickRealType) width, (MagickRealType) height, MagickTrue, exception); rotate_image->alpha_trait = image->alpha_trait; rotate_image->compose = image->compose; rotate_image->page.width = 0; rotate_image->page.height = 0; if (status == MagickFalse) rotate_image = DestroyImage(rotate_image); return (rotate_image); }
LinkedCells.h
/** * @file LinkedCells.h * * @author tchipevn * @date 17.02.2018 */ #pragma once #include "autopas/cells/FullParticleCell.h" #include "autopas/containers/CellBasedParticleContainer.h" #include "autopas/containers/CellBlock3D.h" #include "autopas/containers/CompatibleTraversals.h" #include "autopas/containers/LoadEstimators.h" #include "autopas/containers/cellPairTraversals/BalancedTraversal.h" #include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/iterators/RegionParticleIterator.h" #include "autopas/options/DataLayoutOption.h" #include "autopas/options/LoadEstimatorOption.h" #include "autopas/particles/OwnershipState.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/ParticleCellHelpers.h" #include "autopas/utils/StringUtils.h" #include "autopas/utils/WrapOpenMP.h" #include "autopas/utils/inBox.h" namespace autopas { /** * LinkedCells class. * This class uses a list of neighboring cells to store the particles. * These cells dimensions are at least as large as the given cutoff radius, * therefore short-range interactions only need to be calculated between * particles in neighboring cells. * @tparam Particle type of the Particle */ template <class Particle> class LinkedCells : public CellBasedParticleContainer<FullParticleCell<Particle>> { public: /** * Type of the ParticleCell. */ using ParticleCell = FullParticleCell<Particle>; /** * Type of the Particle. */ using ParticleType = typename ParticleCell::ParticleType; /** * Constructor of the LinkedCells class * @param boxMin * @param boxMax * @param cutoff * @param skin * @param cellSizeFactor cell size factor relative to cutoff * @param loadEstimator the load estimation algorithm for balanced traversals. * By default all applicable traversals are allowed. */ LinkedCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff, const double skin, const double cellSizeFactor = 1.0, LoadEstimatorOption loadEstimator = LoadEstimatorOption::squaredParticlesPerCell) : CellBasedParticleContainer<ParticleCell>(boxMin, boxMax, cutoff, skin), _cellBlock(this->_cells, boxMin, boxMax, cutoff + skin, cellSizeFactor), _loadEstimator(loadEstimator) {} /** * @copydoc ParticleContainerInterface::getContainerType() */ [[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::linkedCells; } /** * @copydoc ParticleContainerInterface::getParticleCellTypeEnum() */ [[nodiscard]] CellType getParticleCellTypeEnum() override { return CellType::FullParticleCell; } /** * @copydoc ParticleContainerInterface::addParticleImpl() */ void addParticleImpl(const ParticleType &p) override { ParticleCell &cell = _cellBlock.getContainingCell(p.getR()); cell.addParticle(p); } /** * @copydoc ParticleContainerInterface::addHaloParticleImpl() */ void addHaloParticleImpl(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); ParticleCell &cell = _cellBlock.getContainingCell(pCopy.getR()); cell.addParticle(pCopy); } /** * @copydoc ParticleContainerInterface::updateHaloParticle() */ bool updateHaloParticle(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); auto cells = _cellBlock.getNearbyHaloCells(pCopy.getR(), this->getSkin()); for (auto cellptr : cells) { bool updated = internal::checkParticleInCellAndUpdateByID(*cellptr, pCopy); if (updated) { return true; } } AutoPasLog(trace, "UpdateHaloParticle was not able to update particle: {}", pCopy.toString()); return false; } void deleteHaloParticles() override { _cellBlock.clearHaloCells(); } void rebuildNeighborLists(TraversalInterface *traversal) override { // nothing to do. } /** * Generates the load estimation function depending on _loadEstimator. * @return load estimator function object. */ BalancedTraversal::EstimatorFunction getLoadEstimatorFunction() { switch (this->_loadEstimator) { case LoadEstimatorOption::squaredParticlesPerCell: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return loadEstimators::squaredParticlesPerCell(this->_cells, cellsPerDimension, lowerCorner, upperCorner); }; } case LoadEstimatorOption::none: [[fallthrough]]; default: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return 1; }; } } } void iteratePairwise(TraversalInterface *traversal) override { // Check if traversal is allowed for this container and give it the data it needs. auto *traversalInterface = dynamic_cast<LCTraversalInterface<ParticleCell> *>(traversal); auto *cellPairTraversal = dynamic_cast<CellPairTraversal<ParticleCell> *>(traversal); if (auto *balancedTraversal = dynamic_cast<BalancedTraversal *>(traversal)) { balancedTraversal->setLoadEstimator(getLoadEstimatorFunction()); } if (traversalInterface && cellPairTraversal) { cellPairTraversal->setCellsToTraverse(this->_cells); } else { autopas::utils::ExceptionHandler::exception( "Trying to use a traversal of wrong type in LinkedCells::iteratePairwise. TraversalID: {}", traversal->getTraversalType()); } traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } [[nodiscard]] std::vector<ParticleType> updateContainer() override { this->deleteHaloParticles(); std::vector<ParticleType> invalidParticles; #ifdef AUTOPAS_OPENMP #pragma omp parallel #endif // AUTOPAS_OPENMP { // private for each thread! std::vector<ParticleType> myInvalidParticles, myInvalidNotOwnedParticles; #ifdef AUTOPAS_OPENMP #pragma omp for #endif // AUTOPAS_OPENMP for (size_t cellId = 0; cellId < this->getCells().size(); ++cellId) { // Delete dummy particles of each cell. this->getCells()[cellId].deleteDummyParticles(); // if empty if (not this->getCells()[cellId].isNotEmpty()) continue; auto [cellLowerCorner, cellUpperCorner] = this->getCellBlock().getCellBoundingBox(cellId); for (auto &&pIter = this->getCells()[cellId].begin(); pIter.isValid(); ++pIter) { // if not in cell if (utils::notInBox(pIter->getR(), cellLowerCorner, cellUpperCorner)) { myInvalidParticles.push_back(*pIter); internal::deleteParticle(pIter); } } } // implicit barrier here // the barrier is needed because iterators are not threadsafe w.r.t. addParticle() // this loop is executed for every thread and thus parallel. Don't use #pragma omp for here! for (auto &&p : myInvalidParticles) { // if not in halo if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { this->template addParticle<false>(p); } else { myInvalidNotOwnedParticles.push_back(p); } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { // merge private vectors to global one. invalidParticles.insert(invalidParticles.end(), myInvalidNotOwnedParticles.begin(), myInvalidNotOwnedParticles.end()); } } return invalidParticles; } /** * @copydoc ParticleContainerInterface::getTraversalSelectorInfo() */ [[nodiscard]] TraversalSelectorInfo getTraversalSelectorInfo() const override { return TraversalSelectorInfo(this->getCellBlock().getCellsPerDimensionWithHalo(), this->getInteractionLength(), this->getCellBlock().getCellLength(), 0); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, true> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { return ParticleIteratorWrapper<ParticleType, true>( new internal::ParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, 0, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, false> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { return ParticleIteratorWrapper<ParticleType, false>( new internal::ParticleIterator<ParticleType, ParticleCell, false>(&this->_cells, 0, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, true> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, true>( new internal::RegionParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, false> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, false>( new internal::RegionParticleIterator<ParticleType, ParticleCell, false>( &this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior)); } /** * Get the cell block, not supposed to be used except by verlet lists * @return the cell block */ internal::CellBlock3D<ParticleCell> &getCellBlock() { return _cellBlock; } /** * @copydoc getCellBlock() * @note const version */ const internal::CellBlock3D<ParticleCell> &getCellBlock() const { return _cellBlock; } /** * Returns reference to the data of LinkedCells * @return the data */ std::vector<ParticleCell> &getCells() { return this->_cells; } protected: /** * object to manage the block of cells. */ internal::CellBlock3D<ParticleCell> _cellBlock; /** * load estimation algorithm for balanced traversals. */ autopas::LoadEstimatorOption _loadEstimator; // ThreeDimensionalCellHandler }; } // namespace autopas
/** * @file LinkedCells.h * * @author tchipevn * @date 17.02.2018 */ #pragma once #include "autopas/cells/FullParticleCell.h" #include "autopas/containers/CellBasedParticleContainer.h" #include "autopas/containers/CellBlock3D.h" #include "autopas/containers/CompatibleTraversals.h" #include "autopas/containers/LoadEstimators.h" #include "autopas/containers/cellPairTraversals/BalancedTraversal.h" #include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/iterators/RegionParticleIterator.h" #include "autopas/options/DataLayoutOption.h" #include "autopas/options/LoadEstimatorOption.h" #include "autopas/particles/OwnershipState.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/ParticleCellHelpers.h" #include "autopas/utils/StringUtils.h" #include "autopas/utils/WrapOpenMP.h" #include "autopas/utils/inBox.h" namespace autopas { /** * LinkedCells class. * This class uses a list of neighboring cells to store the particles. * These cells dimensions are at least as large as the given cutoff radius, * therefore short-range interactions only need to be calculated between * particles in neighboring cells. * @tparam Particle type of the Particle */ template <class Particle> class LinkedCells : public CellBasedParticleContainer<FullParticleCell<Particle>> { public: /** * Type of the ParticleCell. */ using ParticleCell = FullParticleCell<Particle>; /** * Type of the Particle. */ using ParticleType = typename ParticleCell::ParticleType; /** * Constructor of the LinkedCells class * @param boxMin * @param boxMax * @param cutoff * @param skin * @param cellSizeFactor cell size factor relative to cutoff * @param loadEstimator the load estimation algorithm for balanced traversals. * By default all applicable traversals are allowed. */ LinkedCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff, const double skin, const double cellSizeFactor = 1.0, LoadEstimatorOption loadEstimator = LoadEstimatorOption::squaredParticlesPerCell) : CellBasedParticleContainer<ParticleCell>(boxMin, boxMax, cutoff, skin), _cellBlock(this->_cells, boxMin, boxMax, cutoff + skin, cellSizeFactor), _loadEstimator(loadEstimator) {} /** * @copydoc ParticleContainerInterface::getContainerType() */ [[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::linkedCells; } /** * @copydoc ParticleContainerInterface::getParticleCellTypeEnum() */ [[nodiscard]] CellType getParticleCellTypeEnum() override { return CellType::FullParticleCell; } /** * @copydoc ParticleContainerInterface::addParticleImpl() */ void addParticleImpl(const ParticleType &p) override { ParticleCell &cell = _cellBlock.getContainingCell(p.getR()); cell.addParticle(p); } /** * @copydoc ParticleContainerInterface::addHaloParticleImpl() */ void addHaloParticleImpl(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); ParticleCell &cell = _cellBlock.getContainingCell(pCopy.getR()); cell.addParticle(pCopy); } /** * @copydoc ParticleContainerInterface::updateHaloParticle() */ bool updateHaloParticle(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); auto cells = _cellBlock.getNearbyHaloCells(pCopy.getR(), this->getSkin()); for (auto cellptr : cells) { bool updated = internal::checkParticleInCellAndUpdateByID(*cellptr, pCopy); if (updated) { return true; } } AutoPasLog(trace, "UpdateHaloParticle was not able to update particle: {}", pCopy.toString()); return false; } void deleteHaloParticles() override { _cellBlock.clearHaloCells(); } void rebuildNeighborLists(TraversalInterface *traversal) override { // nothing to do. } /** * Generates the load estimation function depending on _loadEstimator. * @return load estimator function object. */ BalancedTraversal::EstimatorFunction getLoadEstimatorFunction() { switch (this->_loadEstimator) { case LoadEstimatorOption::squaredParticlesPerCell: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return loadEstimators::squaredParticlesPerCell(this->_cells, cellsPerDimension, lowerCorner, upperCorner); }; } case LoadEstimatorOption::none: [[fallthrough]]; default: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return 1; }; } } } void iteratePairwise(TraversalInterface *traversal) override { // Check if traversal is allowed for this container and give it the data it needs. auto *traversalInterface = dynamic_cast<LCTraversalInterface<ParticleCell> *>(traversal); auto *cellPairTraversal = dynamic_cast<CellPairTraversal<ParticleCell> *>(traversal); if (auto *balancedTraversal = dynamic_cast<BalancedTraversal *>(traversal)) { balancedTraversal->setLoadEstimator(getLoadEstimatorFunction()); } if (traversalInterface && cellPairTraversal) { cellPairTraversal->setCellsToTraverse(this->_cells); } else { autopas::utils::ExceptionHandler::exception( "Trying to use a traversal of wrong type in LinkedCells::iteratePairwise. TraversalID: {}", traversal->getTraversalType()); } traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } [[nodiscard]] std::vector<ParticleType> updateContainer() override { this->deleteHaloParticles(); std::vector<ParticleType> invalidParticles; { // private for each thread! std::vector<ParticleType> myInvalidParticles, myInvalidNotOwnedParticles; for (size_t cellId = 0; cellId < this->getCells().size(); ++cellId) { // Delete dummy particles of each cell. this->getCells()[cellId].deleteDummyParticles(); // if empty if (not this->getCells()[cellId].isNotEmpty()) continue; auto [cellLowerCorner, cellUpperCorner] = this->getCellBlock().getCellBoundingBox(cellId); for (auto &&pIter = this->getCells()[cellId].begin(); pIter.isValid(); ++pIter) { // if not in cell if (utils::notInBox(pIter->getR(), cellLowerCorner, cellUpperCorner)) { myInvalidParticles.push_back(*pIter); internal::deleteParticle(pIter); } } } // implicit barrier here // the barrier is needed because iterators are not threadsafe w.r.t. addParticle() // this loop is executed for every thread and thus parallel. Don't use for (auto &&p : myInvalidParticles) { // if not in halo if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { this->template addParticle<false>(p); } else { myInvalidNotOwnedParticles.push_back(p); } } { // merge private vectors to global one. invalidParticles.insert(invalidParticles.end(), myInvalidNotOwnedParticles.begin(), myInvalidNotOwnedParticles.end()); } } return invalidParticles; } /** * @copydoc ParticleContainerInterface::getTraversalSelectorInfo() */ [[nodiscard]] TraversalSelectorInfo getTraversalSelectorInfo() const override { return TraversalSelectorInfo(this->getCellBlock().getCellsPerDimensionWithHalo(), this->getInteractionLength(), this->getCellBlock().getCellLength(), 0); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, true> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { return ParticleIteratorWrapper<ParticleType, true>( new internal::ParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, 0, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, false> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { return ParticleIteratorWrapper<ParticleType, false>( new internal::ParticleIterator<ParticleType, ParticleCell, false>(&this->_cells, 0, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, true> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, true>( new internal::RegionParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, false> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, false>( new internal::RegionParticleIterator<ParticleType, ParticleCell, false>( &this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior)); } /** * Get the cell block, not supposed to be used except by verlet lists * @return the cell block */ internal::CellBlock3D<ParticleCell> &getCellBlock() { return _cellBlock; } /** * @copydoc getCellBlock() * @note const version */ const internal::CellBlock3D<ParticleCell> &getCellBlock() const { return _cellBlock; } /** * Returns reference to the data of LinkedCells * @return the data */ std::vector<ParticleCell> &getCells() { return this->_cells; } protected: /** * object to manage the block of cells. */ internal::CellBlock3D<ParticleCell> _cellBlock; /** * load estimation algorithm for balanced traversals. */ autopas::LoadEstimatorOption _loadEstimator; // ThreeDimensionalCellHandler }; } // namespace autopas
/** * @file LinkedCells.h * * @author tchipevn * @date 17.02.2018 */ #pragma once #include "autopas/cells/FullParticleCell.h" #include "autopas/containers/CellBasedParticleContainer.h" #include "autopas/containers/CellBlock3D.h" #include "autopas/containers/CompatibleTraversals.h" #include "autopas/containers/LoadEstimators.h" #include "autopas/containers/cellPairTraversals/BalancedTraversal.h" #include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/iterators/RegionParticleIterator.h" #include "autopas/options/DataLayoutOption.h" #include "autopas/options/LoadEstimatorOption.h" #include "autopas/particles/OwnershipState.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/ParticleCellHelpers.h" #include "autopas/utils/StringUtils.h" #include "autopas/utils/WrapOpenMP.h" #include "autopas/utils/inBox.h" namespace autopas { /** * LinkedCells class. * This class uses a list of neighboring cells to store the particles. * These cells dimensions are at least as large as the given cutoff radius, * therefore short-range interactions only need to be calculated between * particles in neighboring cells. * @tparam Particle type of the Particle */ template <class Particle> class LinkedCells : public CellBasedParticleContainer<FullParticleCell<Particle>> { public: /** * Type of the ParticleCell. */ using ParticleCell = FullParticleCell<Particle>; /** * Type of the Particle. */ using ParticleType = typename ParticleCell::ParticleType; /** * Constructor of the LinkedCells class * @param boxMin * @param boxMax * @param cutoff * @param skin * @param cellSizeFactor cell size factor relative to cutoff * @param loadEstimator the load estimation algorithm for balanced traversals. * By default all applicable traversals are allowed. */ LinkedCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff, const double skin, const double cellSizeFactor = 1.0, LoadEstimatorOption loadEstimator = LoadEstimatorOption::squaredParticlesPerCell) : CellBasedParticleContainer<ParticleCell>(boxMin, boxMax, cutoff, skin), _cellBlock(this->_cells, boxMin, boxMax, cutoff + skin, cellSizeFactor), _loadEstimator(loadEstimator) {} /** * @copydoc ParticleContainerInterface::getContainerType() */ [[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::linkedCells; } /** * @copydoc ParticleContainerInterface::getParticleCellTypeEnum() */ [[nodiscard]] CellType getParticleCellTypeEnum() override { return CellType::FullParticleCell; } /** * @copydoc ParticleContainerInterface::addParticleImpl() */ void addParticleImpl(const ParticleType &p) override { ParticleCell &cell = _cellBlock.getContainingCell(p.getR()); cell.addParticle(p); } /** * @copydoc ParticleContainerInterface::addHaloParticleImpl() */ void addHaloParticleImpl(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); ParticleCell &cell = _cellBlock.getContainingCell(pCopy.getR()); cell.addParticle(pCopy); } /** * @copydoc ParticleContainerInterface::updateHaloParticle() */ bool updateHaloParticle(const ParticleType &haloParticle) override { ParticleType pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); auto cells = _cellBlock.getNearbyHaloCells(pCopy.getR(), this->getSkin()); for (auto cellptr : cells) { bool updated = internal::checkParticleInCellAndUpdateByID(*cellptr, pCopy); if (updated) { return true; } } AutoPasLog(trace, "UpdateHaloParticle was not able to update particle: {}", pCopy.toString()); return false; } void deleteHaloParticles() override { _cellBlock.clearHaloCells(); } void rebuildNeighborLists(TraversalInterface *traversal) override { // nothing to do. } /** * Generates the load estimation function depending on _loadEstimator. * @return load estimator function object. */ BalancedTraversal::EstimatorFunction getLoadEstimatorFunction() { switch (this->_loadEstimator) { case LoadEstimatorOption::squaredParticlesPerCell: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return loadEstimators::squaredParticlesPerCell(this->_cells, cellsPerDimension, lowerCorner, upperCorner); }; } case LoadEstimatorOption::none: [[fallthrough]]; default: { return [&](const std::array<unsigned long, 3> &cellsPerDimension, const std::array<unsigned long, 3> &lowerCorner, const std::array<unsigned long, 3> &upperCorner) { return 1; }; } } } void iteratePairwise(TraversalInterface *traversal) override { // Check if traversal is allowed for this container and give it the data it needs. auto *traversalInterface = dynamic_cast<LCTraversalInterface<ParticleCell> *>(traversal); auto *cellPairTraversal = dynamic_cast<CellPairTraversal<ParticleCell> *>(traversal); if (auto *balancedTraversal = dynamic_cast<BalancedTraversal *>(traversal)) { balancedTraversal->setLoadEstimator(getLoadEstimatorFunction()); } if (traversalInterface && cellPairTraversal) { cellPairTraversal->setCellsToTraverse(this->_cells); } else { autopas::utils::ExceptionHandler::exception( "Trying to use a traversal of wrong type in LinkedCells::iteratePairwise. TraversalID: {}", traversal->getTraversalType()); } traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } [[nodiscard]] std::vector<ParticleType> updateContainer() override { this->deleteHaloParticles(); std::vector<ParticleType> invalidParticles; #ifdef AUTOPAS_OPENMP #pragma omp parallel #endif // AUTOPAS_OPENMP { // private for each thread! std::vector<ParticleType> myInvalidParticles, myInvalidNotOwnedParticles; #ifdef AUTOPAS_OPENMP #pragma omp for #endif // AUTOPAS_OPENMP for (size_t cellId = 0; cellId < this->getCells().size(); ++cellId) { // Delete dummy particles of each cell. this->getCells()[cellId].deleteDummyParticles(); // if empty if (not this->getCells()[cellId].isNotEmpty()) continue; auto [cellLowerCorner, cellUpperCorner] = this->getCellBlock().getCellBoundingBox(cellId); for (auto &&pIter = this->getCells()[cellId].begin(); pIter.isValid(); ++pIter) { // if not in cell if (utils::notInBox(pIter->getR(), cellLowerCorner, cellUpperCorner)) { myInvalidParticles.push_back(*pIter); internal::deleteParticle(pIter); } } } // implicit barrier here // the barrier is needed because iterators are not threadsafe w.r.t. addParticle() // this loop is executed for every thread and thus parallel. Don't use #pragma omp for here! for (auto &&p : myInvalidParticles) { // if not in halo if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { this->template addParticle<false>(p); } else { myInvalidNotOwnedParticles.push_back(p); } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { // merge private vectors to global one. invalidParticles.insert(invalidParticles.end(), myInvalidNotOwnedParticles.begin(), myInvalidNotOwnedParticles.end()); } } return invalidParticles; } /** * @copydoc ParticleContainerInterface::getTraversalSelectorInfo() */ [[nodiscard]] TraversalSelectorInfo getTraversalSelectorInfo() const override { return TraversalSelectorInfo(this->getCellBlock().getCellsPerDimensionWithHalo(), this->getInteractionLength(), this->getCellBlock().getCellLength(), 0); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, true> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { return ParticleIteratorWrapper<ParticleType, true>( new internal::ParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, 0, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, false> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { return ParticleIteratorWrapper<ParticleType, false>( new internal::ParticleIterator<ParticleType, ParticleCell, false>(&this->_cells, 0, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, true> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, true>( new internal::RegionParticleIterator<ParticleType, ParticleCell, true>(&this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior)); } [[nodiscard]] ParticleIteratorWrapper<ParticleType, false> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { // We increase the search region by skin, as particles can move over cell borders. auto startIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::subScalar(lowerCorner, this->getSkin())); auto stopIndex3D = this->_cellBlock.get3DIndexOfPosition(utils::ArrayMath::addScalar(higherCorner, this->getSkin())); size_t numCellsOfInterest = (stopIndex3D[0] - startIndex3D[0] + 1) * (stopIndex3D[1] - startIndex3D[1] + 1) * (stopIndex3D[2] - startIndex3D[2] + 1); std::vector<size_t> cellsOfInterest(numCellsOfInterest); int i = 0; for (size_t z = startIndex3D[2]; z <= stopIndex3D[2]; ++z) { for (size_t y = startIndex3D[1]; y <= stopIndex3D[1]; ++y) { for (size_t x = startIndex3D[0]; x <= stopIndex3D[0]; ++x) { cellsOfInterest[i++] = utils::ThreeDimensionalMapping::threeToOneD({x, y, z}, this->_cellBlock.getCellsPerDimensionWithHalo()); } } } return ParticleIteratorWrapper<ParticleType, false>( new internal::RegionParticleIterator<ParticleType, ParticleCell, false>( &this->_cells, lowerCorner, higherCorner, cellsOfInterest, &_cellBlock, behavior)); } /** * Get the cell block, not supposed to be used except by verlet lists * @return the cell block */ internal::CellBlock3D<ParticleCell> &getCellBlock() { return _cellBlock; } /** * @copydoc getCellBlock() * @note const version */ const internal::CellBlock3D<ParticleCell> &getCellBlock() const { return _cellBlock; } /** * Returns reference to the data of LinkedCells * @return the data */ std::vector<ParticleCell> &getCells() { return this->_cells; } protected: /** * object to manage the block of cells. */ internal::CellBlock3D<ParticleCell> _cellBlock; /** * load estimation algorithm for balanced traversals. */ autopas::LoadEstimatorOption _loadEstimator; // ThreeDimensionalCellHandler }; } // namespace autopas
openmp_wrapper.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #ifdef _OPENMP #include <omp.h> #include <LightGBM/utils/log.h> #include <exception> #include <memory> #include <mutex> #include <stdexcept> #include <vector> inline int OMP_NUM_THREADS() { int ret = 1; #pragma omp parallel #pragma omp master { ret = omp_get_num_threads(); } return ret; } class ThreadExceptionHelper { public: ThreadExceptionHelper() { ex_ptr_ = nullptr; } ~ThreadExceptionHelper() { ReThrow(); } void ReThrow() { if (ex_ptr_ != nullptr) { std::rethrow_exception(ex_ptr_); } } void CaptureException() { // only catch first exception. if (ex_ptr_ != nullptr) { return; } std::unique_lock<std::mutex> guard(lock_); if (ex_ptr_ != nullptr) { return; } ex_ptr_ = std::current_exception(); } private: std::exception_ptr ex_ptr_; std::mutex lock_; }; #define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper #define OMP_LOOP_EX_BEGIN() try { #define OMP_LOOP_EX_END() \ } \ catch (std::exception & ex) { \ Log::Warning(ex.what()); \ omp_except_helper.CaptureException(); \ } \ catch (...) { \ omp_except_helper.CaptureException(); \ } #define OMP_THROW_EX() omp_except_helper.ReThrow() #else #ifdef _MSC_VER #pragma warning(disable : 4068) // disable unknown pragma warning #endif #ifdef __cplusplus extern "C" { #endif /** Fall here if no OPENMP support, so just simulate a single thread running. All #pragma omp should be ignored by the compiler **/ inline void omp_set_num_threads(int) {} inline int omp_get_num_threads() {return 1;} inline int omp_get_thread_num() {return 0;} inline int OMP_NUM_THREADS() { return 1; } #ifdef __cplusplus }; // extern "C" #endif #define OMP_INIT_EX() #define OMP_LOOP_EX_BEGIN() #define OMP_LOOP_EX_END() #define OMP_THROW_EX() #endif #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
#ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
#ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #ifdef _OPENMP #include <omp.h> #include <LightGBM/utils/log.h> #include <exception> #include <memory> #include <mutex> #include <stdexcept> #include <vector> inline int OMP_NUM_THREADS() { int ret = 1; #pragma omp parallel #pragma omp master { ret = omp_get_num_threads(); } return ret; } class ThreadExceptionHelper { public: ThreadExceptionHelper() { ex_ptr_ = nullptr; } ~ThreadExceptionHelper() { ReThrow(); } void ReThrow() { if (ex_ptr_ != nullptr) { std::rethrow_exception(ex_ptr_); } } void CaptureException() { //only catch first exception. if (ex_ptr_ != nullptr) { return; } std: : unique_lock < std: :mutex > guard(lock_); if (ex_ptr_ != nullptr) { return; } ex_ptr_ = std: : current_exception(); } private: std: :exception_ptr ex_ptr_; std: :mutex lock_; }; #define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper #define OMP_LOOP_EX_BEGIN() try { #define OMP_LOOP_EX_END() \ } \ catch (std::exception & ex) { \ Log::Warning(ex.what()); \ omp_except_helper.CaptureException(); \ } \ catch (...) { \ omp_except_helper.CaptureException(); \ } #define OMP_THROW_EX() omp_except_helper.ReThrow() #else #ifdef _MSC_VER #pragma warning(disable : 4068) // disable unknown pragma warning #endif #ifdef __cplusplus extern "C" { #endif /** Fall here if no OPENMP support, so just simulate a single thread running. All #pragma omp should be ignored by the compiler **/ inline void omp_set_num_threads(int) { } inline int omp_get_num_threads() { return 1; } inline int omp_get_thread_num() { return 0; } inline int OMP_NUM_THREADS() { return 1; } #ifdef __cplusplus }; //extern "C" #endif #define OMP_INIT_EX() #define OMP_LOOP_EX_BEGIN() #define OMP_LOOP_EX_END() #define OMP_THROW_EX() #endif #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
omp_for_schedule_static_3.c
<ompts:test> <ompts:testdescription>Test which checks the static option of the omp for schedule directive considering the specifications for the chunk distribution of several loop regions is the same as specified in the Open MP standard version 3.0.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp for schedule(static)</ompts:directive> <ompts:dependences>omp for nowait,omp flush,omp critical,omp single</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" #define NUMBER_OF_THREADS 10 #define CFSMAX_SIZE 1000 #define MAX_TIME 0.01 #ifdef SLEEPTIME #undef SLEEPTIME #define SLEEPTIME 0.0005 #endif int <ompts:testcode:functionname>omp_for_schedule_static_3</ompts:testcode:functionname> (FILE * logFile) { int threads; int i,lasttid; <ompts:orphan:vars> int * tids; int * tids2; int notout; int maxiter; int chunk_size; </ompts:orphan:vars> int counter = 0; int tmp_count=1; int lastthreadsstarttid = -1; int result = 1; chunk_size = 7; tids = (int *) malloc (sizeof (int) * (CFSMAX_SIZE + 1)); notout = 1; maxiter = 0; #pragma omp parallel shared(tids,counter) { /* begin of parallel*/ #pragma omp single { threads = omp_get_num_threads (); } /* end of single */ } /* end of parallel */ if (threads < 2) { printf ("This test only works with at least two threads"); fprintf (logFile,"This test only works with at least two threads"); return 0; } else { fprintf (logFile,"Using an internal count of %d\nUsing a specified chunksize of %d\n", CFSMAX_SIZE, chunk_size); tids[CFSMAX_SIZE] = -1; /* setting endflag */ #pragma omp parallel shared(tids) { /* begin of parallel */ <ompts:orphan> double count; int tid; int j; tid = omp_get_thread_num (); #pragma omp for nowait <ompts:check>schedule(static,chunk_size)</ompts:check> for(j = 0; j < CFSMAX_SIZE; ++j) { count = 0.; #pragma omp flush(maxiter) if (j > maxiter) { #pragma omp critical { maxiter = j; } /* end of critical */ } /*printf ("thread %d sleeping\n", tid);*/ while (notout && (count < MAX_TIME) && (maxiter == j)) { #pragma omp flush(maxiter,notout) my_sleep (SLEEPTIME); count += SLEEPTIME; printf("."); } #ifdef VERBOSE if (count > 0.) printf(" waited %lf s\n", count); #endif /*printf ("thread %d awake\n", tid);*/ tids[j] = tid; #ifdef VERBOSE printf("%d finished by %d\n",j,tid); #endif } /* end of for */ notout = 0; #pragma omp flush(maxiter,notout) </ompts:orphan> } /* end of parallel */ /**** analysing the data in array tids ****/ lasttid = tids[0]; tmp_count = 0; for (i = 0; i < CFSMAX_SIZE + 1; ++i) { /* If the work was done by the same thread increase tmp_count by one. */ if (tids[i] == lasttid) { tmp_count++; #ifdef VERBOSE fprintf (logFile, "%d: %d \n", i, tids[i]); #endif continue; } /* Check if the next thread had has the right thread number. When finding * threadnumber -1 the end should be reached. */ if (tids[i] == (lasttid + 1) % threads || tids[i] == -1) { /* checking for the right chunk size */ if (tmp_count == chunk_size) { tmp_count = 1; lasttid = tids[i]; #ifdef VERBOSE fprintf (logFile, "OK\n"); #endif } /* If the chunk size was wrong, check if the end was reached */ else { if (tids[i] == -1) { if (i == CFSMAX_SIZE) { fprintf (logFile, "Last thread had chunk size %d\n", tmp_count); break; } else { fprintf (logFile, "ERROR: Last thread (thread with number -1) was found before the end.\n"); result = 0; } } else { fprintf (logFile, "ERROR: chunk size was %d. (assigned was %d)\n", tmp_count, chunk_size); result = 0; } } } else { fprintf(logFile, "ERROR: Found thread with number %d (should be inbetween 0 and %d).", tids[i], threads - 1); result = 0; } #ifdef VERBOSE fprintf (logFile, "%d: %d \n", i, tids[i]); #endif } } /* Now we check if several loop regions in one parallel region have the same * logical assignement of chunks to threads. * We use the nowait clause to increase the probability to get an error. */ /* First we allocate some more memmory */ free (tids); tids2 = (int *) malloc (sizeof (int) * LOOPCOUNT); tids2 = (int *) malloc (sizeof (int) * LOOPCOUNT); #pragma omp parallel { <ompts:orphan> { int n; #pragma omp for <ompts:check>schedule(static)</ompts:check> nowait for (n = 0; n < LOOPCOUNT; n++) { if (LOOPCOUNT == n + 1 ) my_sleep(SLEEPTIME); tids[n] = omp_get_thread_num(); } } </ompts:orphan> <ompts:orphan> { int m; #pragma omp for <ompts:check>schedule(static)</ompts:check> nowait for (m = 1; m <= LOOPCOUNT; m++) { tids2[m-1] = omp_get_thread_num(); } } </ompts:orphan> } for (i = 0; i < LOOPCOUNT; i++) if (tids[i] != tids2[i]) { fprintf (logFile, "Chunk no. %d was assigned once to thread %d and later to thread %d.\n", i, tids[i],tids2[i]); result = 0; } free (tids); free (tids2); return result; } </ompts:testcode> </ompts:test>
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /* * ! \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker:public TreeUpdater { public: void Init(const std::vector < std::pair < std::string, std::string > >&args)override { param_.InitAllowUnknown(args); } protected: //helper to collect and query feature meta information struct FMetaHelper { public: /* ! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix * p_fmat, const RegTree & tree) { fminmax_.resize(tree.param.num_feature * 2); std: : fill(fminmax_.begin(), fminmax_.end(), -std: : numeric_limits < bst_float >: :max()); //start accumulating statistics for (const auto & batch:p_fmat->GetSortedColumnBatches()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std: : max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std: : max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /* ! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce < rabit::op::Max > (dmlc::BeginPtr(fminmax_), fminmax_.size()); } //get feature type, 0: empty 1: binary 2: real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std: : numeric_limits < bst_float >: : max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid * 2 + 1]; } inline void SampleCol(float p, std::vector < bst_uint > *p_findex)const { std::vector < bst_uint > &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast < bst_uint > (i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast < unsigned >(p * findex.size()); std: : shuffle(findex.begin(), findex.end(), common: :GlobalRandom()); findex.resize(n); //sync the findex if it is subsample std: : string s_cache; common: :MemoryBufferStream fc(&s_cache); dmlc: :Stream & fs = fc; if (rabit: :GetRank() == 0) { fs.Write(findex); } rabit: :Broadcast(&s_cache, 0); fs.Read(&findex); } private: std: : vector < bst_float > fminmax_; }; //------static helper functions-- -- -- //helper function to get to next level of the tree /* ! \brief this is helper function for row based data */ inline static int NextLevel(const SparsePage::Inst & inst, const RegTree & tree, int nid) { const RegTree::Node & n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto & ins: inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } //------class member helpers-- -- ----- /* ! \brief initialize temp data structure */ inline void InitData(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector < unsigned >&root_index = fmat.Info().root_index_; { //setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std: : fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } //mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) position_[i] = ~position_[i]; } //mark subsample if (param_.subsample < 1.0 f) { std: : bernoulli_distribution coin_flip(param_.subsample); auto & rnd = common: :GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { //expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /* ! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree & tree) { std::vector < int >newnodes; for (int nid: qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } //use new nodes for qexpand qexpand_ = newnodes; this ->UpdateNode2WorkIndex(tree); } //return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } //encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /* * ! \brief this is helper function uses column based data * structure, reset the positions to the lastest one \param nodes * the set of nodes that contains the split to be used \param * p_fmat feature matrix needed for tree construction \param tree * the regression tree structure */ inline void ResetPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { //set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /* * ! \brief helper function to set the non-leaf positions to * default direction. This function can be applied multiple times * and will get the same result. \param p_fmat feature matrix * needed for tree construction \param tree the regression tree * structure */ inline void SetDefaultPostion(DMatrix * p_fmat, const RegTree & tree) { //set default direct nodes to default //for leaf nodes that are not fresh, mark then to ~ nid, //so that they are ignored in future statistics collection const auto ndata = static_cast < bst_omp_uint > (p_fmat->Info().num_row_); for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { //mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { //push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /* * ! \brief this is helper function uses column based data * structure, to CORRECT the positions of non-default directions * that WAS set to default before calling this function. \param * batch The column batch \param sorted_split_set The set of * index that contains split solutions. \param tree the * regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage & batch, const std::vector < bst_uint > &sorted_split_set, const RegTree & tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast < bst_omp_uint > (col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); //go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /* * ! \brief this is helper function uses column based data * structure, \param nodes the set of nodes that contains the * split to be used \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector < int >&nodes, const RegTree & tree, std::vector < unsigned >*out_split_set) { std::vector < unsigned >&fsplits = *out_split_set; fsplits.clear(); //step 1, classify the non - default data into right places for (int nid: nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std: : sort(fsplits.begin(), fsplits.end()); fsplits.resize(std: :unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /* * ! \brief this is helper function uses column based data * structure, update all positions into nondefault branch, if * any, ignore the default branch \param nodes the set of nodes * that contains the split to be used \param p_fmat feature * matrix needed for tree construction \param tree the regression * tree structure */ virtual void SetNonDefaultPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { std::vector < unsigned >fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto & batch: p_fmat->GetSortedColumnBatches()) { for (auto fid:fsplits) { auto col = batch[fid]; const auto ndata = static_cast < bst_omp_uint > (col.size()); for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); //go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /* ! \brief helper function to get statistics from a tree */ template < typename TStats > inline void GetNodeStats(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree, std::vector < std::vector < TStats > >*p_thread_temp, std::vector < TStats > *p_node_stats) { std::vector < std::vector < TStats > >&thread_temp = *p_thread_temp; const MetaInfo & info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid: qexpand_) { thread_temp[tid][nid] = TStats(); } //setup position const auto ndata = static_cast < bst_omp_uint > (fmat.Info().num_row_); for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } } //sum the per thread statistics together for (int nid:qexpand_) { TStats & s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /* ! \brief common helper data structure to build sketch */ struct SketchEntry { /* ! \brief total sum of amount to be met */ double sum_total; /* ! \brief statistics used in the sketch */ double rmin, wmin; /* ! \brief last seen feature value */ bst_float last_fvalue; /* ! \brief current size of sketch */ double next_goal; //pointer to the sketch to put things in common: : WXQuantileSketch < bst_float, bst_float > *sketch; //initialize the space inline void Init(unsigned max_size) { next_goal = -1.0 f; rmin = wmin = 0.0 f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /* * ! \brief push a new element to sketch \param fvalue * feature value, comes in sorted ascending order \param w * weight \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0 f) { next_goal = 0.0 f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0 f + 1e-5 f; } else { next_goal = static_cast < bst_float > (sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /* ! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /* ! \brief training parameter of tree grower */ TrainParam param_; /* ! \brief queue of nodes to be expanded */ std: : vector < int >qexpand_; /* * ! \brief map active node to is working index offset in * qexpand, can be -1, which means the node is node actively * expanding */ std: : vector < int >node2workindex_; /* * ! \brief position of each instance in the tree can be * negative, which means this position is no longer expanding see * also Decode/EncodePosition */ std: : vector < int >position_; private: inline void UpdateNode2WorkIndex(const RegTree & tree) { //update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast < int >(i); } } }; } //namespace tree } //namespace xgboost #endif /* // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ */
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/tree_updater.h> #include <vector> #include <algorithm> #include <string> #include <limits> #include <utility> #include "./param.h" #include "../common/io.h" #include "../common/random.h" #include "../common/quantile.h" namespace xgboost { namespace tree { /* * ! \brief base tree maker class that defines common operation * needed in tree making */ class BaseMaker:public TreeUpdater { public: void Init(const std::vector < std::pair < std::string, std::string > >&args)override { param_.InitAllowUnknown(args); } protected: //helper to collect and query feature meta information struct FMetaHelper { public: /* ! \brief find type of each feature, use column format */ inline void InitByCol(DMatrix * p_fmat, const RegTree & tree) { fminmax_.resize(tree.param.num_feature * 2); std: : fill(fminmax_.begin(), fminmax_.end(), -std: : numeric_limits < bst_float >: :max()); //start accumulating statistics for (const auto & batch:p_fmat->GetSortedColumnBatches()) { for (bst_uint fid = 0; fid < batch.Size(); ++fid) { auto c = batch[fid]; if (c.size() != 0) { CHECK_LT(fid * 2, fminmax_.size()); fminmax_[fid * 2 + 0] = std: : max(-c[0].fvalue, fminmax_[fid * 2 + 0]); fminmax_[fid * 2 + 1] = std: : max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]); } } } } /* ! \brief synchronize the information */ inline void SyncInfo() { rabit::Allreduce < rabit::op::Max > (dmlc::BeginPtr(fminmax_), fminmax_.size()); } //get feature type, 0: empty 1: binary 2: real inline int Type(bst_uint fid) const { CHECK_LT(fid * 2 + 1, fminmax_.size()) << "FeatHelper fid exceed query bound "; bst_float a = fminmax_[fid * 2]; bst_float b = fminmax_[fid * 2 + 1]; if (a == -std: : numeric_limits < bst_float >: : max()) return 0; if (-a == b) { return 1; } else { return 2; } } inline bst_float MaxValue(bst_uint fid) const { return fminmax_[fid * 2 + 1]; } inline void SampleCol(float p, std::vector < bst_uint > *p_findex)const { std::vector < bst_uint > &findex = *p_findex; findex.clear(); for (size_t i = 0; i < fminmax_.size(); i += 2) { const auto fid = static_cast < bst_uint > (i / 2); if (this->Type(fid) != 0) findex.push_back(fid); } auto n = static_cast < unsigned >(p * findex.size()); std: : shuffle(findex.begin(), findex.end(), common: :GlobalRandom()); findex.resize(n); //sync the findex if it is subsample std: : string s_cache; common: :MemoryBufferStream fc(&s_cache); dmlc: :Stream & fs = fc; if (rabit: :GetRank() == 0) { fs.Write(findex); } rabit: :Broadcast(&s_cache, 0); fs.Read(&findex); } private: std: : vector < bst_float > fminmax_; }; //------static helper functions-- -- -- //helper function to get to next level of the tree /* ! \brief this is helper function for row based data */ inline static int NextLevel(const SparsePage::Inst & inst, const RegTree & tree, int nid) { const RegTree::Node & n = tree[nid]; bst_uint findex = n.SplitIndex(); for (const auto & ins: inst) { if (findex == ins.index) { if (ins.fvalue < n.SplitCond()) { return n.LeftChild(); } else { return n.RightChild(); } } } return n.DefaultChild(); } //------class member helpers-- -- ----- /* ! \brief initialize temp data structure */ inline void InitData(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree) { CHECK_EQ(tree.param.num_nodes, tree.param.num_roots) << "TreeMaker: can only grow new tree"; const std::vector < unsigned >&root_index = fmat.Info().root_index_; { //setup position position_.resize(gpair.size()); if (root_index.size() == 0) { std: : fill(position_.begin(), position_.end(), 0); } else { for (size_t i = 0; i < position_.size(); ++i) { position_[i] = root_index[i]; CHECK_LT(root_index[i], (unsigned)tree.param.num_roots) << "root index exceed setting"; } } //mark delete for the deleted datas for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) position_[i] = ~position_[i]; } //mark subsample if (param_.subsample < 1.0 f) { std: : bernoulli_distribution coin_flip(param_.subsample); auto & rnd = common: :GlobalRandom(); for (size_t i = 0; i < position_.size(); ++i) { if (gpair[i].GetHess() < 0.0 f) continue; if (!coin_flip(rnd)) position_[i] = ~position_[i]; } } } { //expand query qexpand_.reserve(256); qexpand_.clear(); for (int i = 0; i < tree.param.num_roots; ++i) { qexpand_.push_back(i); } this->UpdateNode2WorkIndex(tree); } } /* ! \brief update queue expand add in new leaves */ inline void UpdateQueueExpand(const RegTree & tree) { std::vector < int >newnodes; for (int nid: qexpand_) { if (!tree[nid].IsLeaf()) { newnodes.push_back(tree[nid].LeftChild()); newnodes.push_back(tree[nid].RightChild()); } } //use new nodes for qexpand qexpand_ = newnodes; this ->UpdateNode2WorkIndex(tree); } //return decoded position inline int DecodePosition(bst_uint ridx) const { const int pid = position_[ridx]; return pid < 0 ? ~pid : pid; } //encode the encoded position value for ridx inline void SetEncodePosition(bst_uint ridx, int nid) { if (position_[ridx] < 0) { position_[ridx] = ~nid; } else { position_[ridx] = nid; } } /* * ! \brief this is helper function uses column based data * structure, reset the positions to the lastest one \param nodes * the set of nodes that contains the split to be used \param * p_fmat feature matrix needed for tree construction \param tree * the regression tree structure */ inline void ResetPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { //set the positions in the nondefault this->SetNonDefaultPositionCol(nodes, p_fmat, tree); this->SetDefaultPostion(p_fmat, tree); } /* * ! \brief helper function to set the non-leaf positions to * default direction. This function can be applied multiple times * and will get the same result. \param p_fmat feature matrix * needed for tree construction \param tree the regression tree * structure */ inline void SetDefaultPostion(DMatrix * p_fmat, const RegTree & tree) { //set default direct nodes to default //for leaf nodes that are not fresh, mark then to ~ nid, //so that they are ignored in future statistics collection const auto ndata = static_cast < bst_omp_uint > (p_fmat->Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = this->DecodePosition(ridx); if (tree[nid].IsLeaf()) { //mark finish when it is not a fresh leaf if (tree[nid].RightChild() == -1) { position_[ridx] = ~nid; } } else { //push to default branch if (tree[nid].DefaultLeft()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } /* * ! \brief this is helper function uses column based data * structure, to CORRECT the positions of non-default directions * that WAS set to default before calling this function. \param * batch The column batch \param sorted_split_set The set of * index that contains split solutions. \param tree the * regression tree structure */ inline void CorrectNonDefaultPositionByBatch( const SparsePage & batch, const std::vector < bst_uint > &sorted_split_set, const RegTree & tree) { for (size_t fid = 0; fid < batch.Size(); ++fid) { auto col = batch[fid]; auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid); if (it != sorted_split_set.end() && *it == fid) { const auto ndata = static_cast < bst_omp_uint > (col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); CHECK(tree[nid].IsLeaf()); int pid = tree[nid].Parent(); //go back to parent, correct those who are not default if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) { if (fvalue < tree[pid].SplitCond()) { this->SetEncodePosition(ridx, tree[pid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[pid].RightChild()); } } } } } } /* * ! \brief this is helper function uses column based data * structure, \param nodes the set of nodes that contains the * split to be used \param tree the regression tree structure * \param out_split_set The split index set */ inline void GetSplitSet(const std::vector < int >&nodes, const RegTree & tree, std::vector < unsigned >*out_split_set) { std::vector < unsigned >&fsplits = *out_split_set; fsplits.clear(); //step 1, classify the non - default data into right places for (int nid: nodes) { if (!tree[nid].IsLeaf()) { fsplits.push_back(tree[nid].SplitIndex()); } } std: : sort(fsplits.begin(), fsplits.end()); fsplits.resize(std: :unique(fsplits.begin(), fsplits.end()) - fsplits.begin()); } /* * ! \brief this is helper function uses column based data * structure, update all positions into nondefault branch, if * any, ignore the default branch \param nodes the set of nodes * that contains the split to be used \param p_fmat feature * matrix needed for tree construction \param tree the regression * tree structure */ virtual void SetNonDefaultPositionCol(const std::vector < int >&nodes, DMatrix * p_fmat, const RegTree & tree) { std::vector < unsigned >fsplits; this->GetSplitSet(nodes, tree, &fsplits); for (const auto & batch: p_fmat->GetSortedColumnBatches()) { for (auto fid:fsplits) { auto col = batch[fid]; const auto ndata = static_cast < bst_omp_uint > (col.size()); #pragma omp parallel for schedule(static) for (bst_omp_uint j = 0; j < ndata; ++j) { const bst_uint ridx = col[j].index; const bst_float fvalue = col[j].fvalue; const int nid = this->DecodePosition(ridx); //go back to parent, correct those who are not default if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) { if (fvalue < tree[nid].SplitCond()) { this->SetEncodePosition(ridx, tree[nid].LeftChild()); } else { this->SetEncodePosition(ridx, tree[nid].RightChild()); } } } } } } /* ! \brief helper function to get statistics from a tree */ template < typename TStats > inline void GetNodeStats(const std::vector < GradientPair > &gpair, const DMatrix & fmat, const RegTree & tree, std::vector < std::vector < TStats > >*p_thread_temp, std::vector < TStats > *p_node_stats) { std::vector < std::vector < TStats > >&thread_temp = *p_thread_temp; const MetaInfo & info = fmat.Info(); thread_temp.resize(omp_get_max_threads()); p_node_stats->resize(tree.param.num_nodes); #pragma omp parallel { const int tid = omp_get_thread_num(); thread_temp[tid].resize(tree.param.num_nodes, TStats()); for (unsigned int nid: qexpand_) { thread_temp[tid][nid] = TStats(); } } //setup position const auto ndata = static_cast < bst_omp_uint > (fmat.Info().num_row_); #pragma omp parallel for schedule(static) for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) { const int nid = position_[ridx]; const int tid = omp_get_thread_num(); if (nid >= 0) { thread_temp[tid][nid].Add(gpair[ridx]); } } //sum the per thread statistics together for (int nid:qexpand_) { TStats & s = (*p_node_stats)[nid]; s = TStats(); for (size_t tid = 0; tid < thread_temp.size(); ++tid) { s.Add(thread_temp[tid][nid]); } } } /* ! \brief common helper data structure to build sketch */ struct SketchEntry { /* ! \brief total sum of amount to be met */ double sum_total; /* ! \brief statistics used in the sketch */ double rmin, wmin; /* ! \brief last seen feature value */ bst_float last_fvalue; /* ! \brief current size of sketch */ double next_goal; //pointer to the sketch to put things in common: : WXQuantileSketch < bst_float, bst_float > *sketch; //initialize the space inline void Init(unsigned max_size) { next_goal = -1.0 f; rmin = wmin = 0.0 f; sketch->temp.Reserve(max_size + 1); sketch->temp.size = 0; } /* * ! \brief push a new element to sketch \param fvalue * feature value, comes in sorted ascending order \param w * weight \param max_size */ inline void Push(bst_float fvalue, bst_float w, unsigned max_size) { if (next_goal == -1.0 f) { next_goal = 0.0 f; last_fvalue = fvalue; wmin = w; return; } if (last_fvalue != fvalue) { double rmax = rmin + wmin; if (rmax >= next_goal && sketch->temp.size != max_size) { if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); CHECK_LT(sketch->temp.size, max_size) << "invalid maximum size max_size=" << max_size << ", stemp.size" << sketch->temp.size; ++sketch->temp.size; } if (sketch->temp.size == max_size) { next_goal = sum_total * 2.0 f + 1e-5 f; } else { next_goal = static_cast < bst_float > (sketch->temp.size * sum_total / max_size); } } else { if (rmax >= next_goal) { LOG(TRACKER) << "INFO: rmax=" << rmax << ", sum_total=" << sum_total << ", naxt_goal=" << next_goal << ", size=" << sketch->temp.size; } } rmin = rmax; wmin = w; last_fvalue = fvalue; } else { wmin += w; } } /* ! \brief push final unfinished value to the sketch */ inline void Finalize(unsigned max_size) { double rmax = rmin + wmin; if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size - 1].value) { CHECK_LE(sketch->temp.size, max_size) << "Finalize: invalid maximum size, max_size=" << max_size << ", stemp.size=" << sketch->temp.size; //push to sketch sketch->temp.data[sketch->temp.size] = common: : WXQuantileSketch < bst_float, bst_float >: : Entry(static_cast < bst_float > (rmin), static_cast < bst_float > (rmax), static_cast < bst_float > (wmin), last_fvalue); ++sketch->temp.size; } sketch->PushTemp(); } }; /* ! \brief training parameter of tree grower */ TrainParam param_; /* ! \brief queue of nodes to be expanded */ std: : vector < int >qexpand_; /* * ! \brief map active node to is working index offset in * qexpand, can be -1, which means the node is node actively * expanding */ std: : vector < int >node2workindex_; /* * ! \brief position of each instance in the tree can be * negative, which means this position is no longer expanding see * also Decode/EncodePosition */ std: : vector < int >position_; private: inline void UpdateNode2WorkIndex(const RegTree & tree) { //update the node2workindex std::fill(node2workindex_.begin(), node2workindex_.end(), -1); node2workindex_.resize(tree.param.num_nodes); for (size_t i = 0; i < qexpand_.size(); ++i) { node2workindex_[qexpand_[i]] = static_cast < int >(i); } } }; } //namespace tree } //namespace xgboost #endif /* // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_ */
bug_proxy_task_dep_waiting.c
// RUN: %libomp-compile-and-run // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc, icc-16 // REQUIRES: !abt // Very flaky on openmp-clang-x86_64-linux-debian. // https://bugs.llvm.org/show_bug.cgi?id=45397 // UNSUPPORTED: linux #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* An explicit task can have a dependency on a target task. If it is not directly satisfied, the runtime should not wait but resume execution. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; /* * Corresponds to: #pragma omp target nowait depend(out: dep) { my_sleep( 0.1 ); } */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); int first_task_finished = 0; #pragma omp task shared(first_task_finished) depend(inout: dep) { first_task_finished = 1; } int second_task_finished = 0; #pragma omp task shared(second_task_finished) depend(in: dep) { second_task_finished = 1; } // check that execution has been resumed and the runtime has not waited // for the dependencies to be satisfied. int error = (first_task_finished == 1); error += (second_task_finished == 1); #pragma omp taskwait // by now all tasks should have finished error += (first_task_finished != 1); error += (second_task_finished != 1); return error; }
// RUN: %libomp-compile-and-run // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc, icc-16 // REQUIRES: !abt // Very flaky on openmp-clang-x86_64-linux-debian. // https://bugs.llvm.org/show_bug.cgi?id=45397 // UNSUPPORTED: linux #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* An explicit task can have a dependency on a target task. If it is not directly satisfied, the runtime should not wait but resume execution. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; /* * Corresponds to: my_sleep( 0.1 ); */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); int first_task_finished = 0; first_task_finished = 1; int second_task_finished = 0; second_task_finished = 1; // check that execution has been resumed and the runtime has not waited // for the dependencies to be satisfied. int error = (first_task_finished == 1); error += (second_task_finished == 1); // by now all tasks should have finished error += (first_task_finished != 1); error += (second_task_finished != 1); return error; }
// RUN: %libomp-compile-and-run // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc, icc-16 // REQUIRES: !abt // Very flaky on openmp-clang-x86_64-linux-debian. // https://bugs.llvm.org/show_bug.cgi?id=45397 // UNSUPPORTED: linux #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* An explicit task can have a dependency on a target task. If it is not directly satisfied, the runtime should not wait but resume execution. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; /* * Corresponds to: #pragma omp target nowait depend(out: dep) { my_sleep( 0.1 ); } */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); int first_task_finished = 0; #pragma omp task shared(first_task_finished) depend(inout: dep) { first_task_finished = 1; } int second_task_finished = 0; #pragma omp task shared(second_task_finished) depend(in: dep) { second_task_finished = 1; } // check that execution has been resumed and the runtime has not waited // for the dependencies to be satisfied. int error = (first_task_finished == 1); error += (second_task_finished == 1); #pragma omp taskwait // by now all tasks should have finished error += (first_task_finished != 1); error += (second_task_finished != 1); return error; }
critical.c
/* critical Directive Example */ #include <omp.h> main(int argc, char *argv[]) { int x; x = 0; #pragma omp parallel shared(x) { #pragma omp critical x = x + 1; } /* end of parallel region */ }
/* critical Directive Example */ #include <omp.h> main(int argc, char *argv[]) { int x; x = 0; x = x + 1; /* end of parallel region */ }
/* critical Directive Example */ #include <omp.h> main(int argc, char *argv[]) { int x; x = 0; #pragma omp parallel shared(x) { #pragma omp critical x = x + 1; } /* end of parallel region */ }
vect-simd-clone-2.c
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd" } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #include "tree-vect.h" #ifndef N #define N 1024 #endif int array[N] __attribute__((aligned (32))); #pragma omp declare simd simdlen(4) notinbranch aligned(a:16) uniform(a) linear(b) #pragma omp declare simd simdlen(4) notinbranch aligned(a:32) uniform(a) linear(b) #pragma omp declare simd simdlen(8) notinbranch aligned(a:16) uniform(a) linear(b) #pragma omp declare simd simdlen(8) notinbranch aligned(a:32) uniform(a) linear(b) __attribute__((noinline)) void foo (int *a, int b, int c) { a[b] = c; } __attribute__((noinline, noclone)) void bar () { int i; #pragma omp simd for (i = 0; i < N; ++i) foo (array, i, i * array[i]); } __attribute__((noinline, noclone)) void baz () { int i; for (i = 0; i < N; i++) array[i] = 5 * (i & 7); } int main () { int i; check_vect (); baz (); bar (); for (i = 0; i < N; i++) if (array[i] != 5 * (i & 7) * i) abort (); return 0; }
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd" } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #include "tree-vect.h" #ifndef N #define N 1024 #endif int array[N] __attribute__((aligned(32))); __attribute__((noinline)) void foo(int *a, int b, int c) { a[b] = c; } __attribute__((noinline, noclone)) void bar() { int i; for (i = 0; i < N; ++i) foo(array, i, i * array[i]); } __attribute__((noinline, noclone)) void baz() { int i; for (i = 0; i < N; i++) array[i] = 5 * (i & 7); } int main() { int i; check_vect(); baz(); bar(); for (i = 0; i < N; i++) if (array[i] != 5 * (i & 7) * i) abort(); return 0; }
/* { dg-require-effective-target vect_simd_clones } */ /* { dg-additional-options "-fopenmp-simd" } */ /* { dg-additional-options "-mavx" { target avx_runtime } } */ #include "tree-vect.h" #ifndef N #define N 1024 #endif int array[N] __attribute__((aligned(32))); #pragma omp declare simd simdlen(4) notinbranch aligned(a:16) uniform(a) linear(b) #pragma omp declare simd simdlen(4) notinbranch aligned(a:32) uniform(a) linear(b) #pragma omp declare simd simdlen(8) notinbranch aligned(a:16) uniform(a) linear(b) #pragma omp declare simd simdlen(8) notinbranch aligned(a:32) uniform(a) linear(b) __attribute__((noinline)) void foo(int *a, int b, int c) { a[b] = c; } __attribute__((noinline, noclone)) void bar() { int i; #pragma omp simd for (i = 0; i < N; ++i) foo(array, i, i * array[i]); } __attribute__((noinline, noclone)) void baz() { int i; for (i = 0; i < N; i++) array[i] = 5 * (i & 7); } int main() { int i; check_vect(); baz(); bar(); for (i = 0; i < N; i++) if (array[i] != 5 * (i & 7) * i) abort(); return 0; }
kpoint.c
/* Copyright (C) 2008 Atsushi Togo */ /* All rights reserved. */ /* This file is part of spglib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include "mathfunc.h" #include "kpoint.h" #include "kgrid.h" #ifdef KPTWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif const int kpt_bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]); static int get_ir_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal); static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]); /* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int num_ir; num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal); return num_ir; } int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const int num_q, SPGCONST double qpoints[][3]) { int num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } void kpt_get_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = kgd_get_grid_point_double_mesh(address_double, mesh); } } void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const int bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_grid_point_double_mesh(address_double, bzmesh)]; } } int kpt_relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { return get_point_group_reciprocal(rotations, is_time_reversal); } MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { return get_point_group_reciprocal_with_q(rot_reciprocal, symprec, num_q, qpoints); } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_rot; MatINT *rot_reciprocal, *rot_return; int *unique_rot; SPGCONST int inversion[3][3] = { {-1, 0, 0 }, { 0,-1, 0 }, { 0, 0,-1 } }; rot_reciprocal = NULL; rot_return = NULL; unique_rot = NULL; if (is_time_reversal) { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) { return NULL; } } else { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) { return NULL; } } if ((unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { unique_rot[i] = -1; } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i], inversion, rot_reciprocal->mat[i]); } } num_rot = 0; for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_rot; j++) { if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]], rot_reciprocal->mat[i])) { goto escape; } } unique_rot[num_rot] = i; num_rot++; escape: ; } if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); } } free(unique_rot); unique_rot = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return rot_return; } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok, num_rot; int *ir_rot; double q_rot[3], diff[3]; MatINT * rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; is_all_ok = 0; num_rot = 0; if ((ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, rot_reciprocal->mat[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (! is_all_ok) { break; } } if (is_all_ok) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; } static int get_ir_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ int i, j, grid_point_rot, num_ir; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); #pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } num_ir = 0; #pragma omp parallel for reduction(+:num_ir) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { if (ir_mapping_table[i] == i) { num_ir++; } } return num_ir; } /* Relocate grid addresses to first Brillouin zone */ /* bz_grid_address[prod(mesh + 1)][3] */ /* bz_map[prod(mesh * 2)] */ static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) { bz_map[i] = -1; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * mesh[2]; /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]) { int i, j; double tolerance; double length[3]; for (i = 0; i < 3; i++) { length[i] = 0; for (j = 0; j < 3; j++) { length[i] += rec_lattice[j][i] * rec_lattice[j][i]; } length[i] /= mesh[i] * mesh[i]; } tolerance = length[0]; for (i = 1; i < 3; i++) { if (tolerance < length[i]) { tolerance = length[i]; } } tolerance *= 0.01; return tolerance; }
/* All rights reserved. */ /* This file is part of spglib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include "mathfunc.h" #include "kpoint.h" #include "kgrid.h" #ifdef KPTWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif const int kpt_bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { {0, 0, 0}, {0, 0, 1}, {0, 0, 2}, {0, 0, -2}, {0, 0, -1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 2}, {0, 1, -2}, {0, 1, -1}, {0, 2, 0}, {0, 2, 1}, {0, 2, 2}, {0, 2, -2}, {0, 2, -1}, {0, -2, 0}, {0, -2, 1}, {0, -2, 2}, {0, -2, -2}, {0, -2, -1}, {0, -1, 0}, {0, -1, 1}, {0, -1, 2}, {0, -1, -2}, {0, -1, -1}, {1, 0, 0}, {1, 0, 1}, {1, 0, 2}, {1, 0, -2}, {1, 0, -1}, {1, 1, 0}, {1, 1, 1}, {1, 1, 2}, {1, 1, -2}, {1, 1, -1}, {1, 2, 0}, {1, 2, 1}, {1, 2, 2}, {1, 2, -2}, {1, 2, -1}, {1, -2, 0}, {1, -2, 1}, {1, -2, 2}, {1, -2, -2}, {1, -2, -1}, {1, -1, 0}, {1, -1, 1}, {1, -1, 2}, {1, -1, -2}, {1, -1, -1}, {2, 0, 0}, {2, 0, 1}, {2, 0, 2}, {2, 0, -2}, {2, 0, -1}, {2, 1, 0}, {2, 1, 1}, {2, 1, 2}, {2, 1, -2}, {2, 1, -1}, {2, 2, 0}, {2, 2, 1}, {2, 2, 2}, {2, 2, -2}, {2, 2, -1}, {2, -2, 0}, {2, -2, 1}, {2, -2, 2}, {2, -2, -2}, {2, -2, -1}, {2, -1, 0}, {2, -1, 1}, {2, -1, 2}, {2, -1, -2}, {2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static MatINT * get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static MatINT * get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]); static int get_ir_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal); static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]); /* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal) { int num_ir; num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal); return num_ir; } int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const int num_q, SPGCONST double qpoints[][3]) { int num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } void kpt_get_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = kgd_get_grid_point_double_mesh(address_double, mesh); } } void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const int bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_grid_point_double_mesh(address_double, bzmesh)]; } } int kpt_relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } MatINT * kpt_get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { return get_point_group_reciprocal(rotations, is_time_reversal); } MatINT * kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { return get_point_group_reciprocal_with_q(rot_reciprocal, symprec, num_q, qpoints); } /* Return NULL if failed */ static MatINT * get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_rot; MatINT *rot_reciprocal, *rot_return; int *unique_rot; SPGCONST int inversion[3][3] = { {-1, 0, 0}, {0, -1, 0}, {0, 0, -1} }; rot_reciprocal = NULL; rot_return = NULL; unique_rot = NULL; if (is_time_reversal) { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) { return NULL; } } else { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) { return NULL; } } if ((unique_rot = (int *)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { unique_rot[i] = -1; } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size + i], inversion, rot_reciprocal->mat[i]); } } num_rot = 0; for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_rot; j++) { if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]], rot_reciprocal->mat[i])) { goto escape; } } unique_rot[num_rot] = i; num_rot++; escape: ; } if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); } } free(unique_rot); unique_rot = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return rot_return; } /* Return NULL if failed */ static MatINT * get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok, num_rot; int *ir_rot; double q_rot[3], diff[3]; MatINT *rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; is_all_ok = 0; num_rot = 0; if ((ir_rot = (int *)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, rot_reciprocal->mat[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (!is_all_ok) { break; } } if (is_all_ok) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; } static int get_ir_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ int i, j, grid_point_rot, num_ir; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { } } } num_ir = 0; for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { if (ir_mapping_table[i] == i) { num_ir++; } } return num_ir; } /* Relocate grid addresses to first Brillouin zone */ /* bz_grid_address[prod(mesh + 1)][3] */ /* bz_map[prod(mesh * 2)] */ static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) { bz_map[i] = -1; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * mesh[2]; /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]) { int i, j; double tolerance; double length[3]; for (i = 0; i < 3; i++) { length[i] = 0; for (j = 0; j < 3; j++) { length[i] += rec_lattice[j][i] * rec_lattice[j][i]; } length[i] /= mesh[i] * mesh[i]; } tolerance = length[0]; for (i = 1; i < 3; i++) { if (tolerance < length[i]) { tolerance = length[i]; } } tolerance *= 0.01; return tolerance; }
/* All rights reserved. */ /* This file is part of spglib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include "mathfunc.h" #include "kpoint.h" #include "kgrid.h" #ifdef KPTWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif const int kpt_bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { {0, 0, 0}, {0, 0, 1}, {0, 0, 2}, {0, 0, -2}, {0, 0, -1}, {0, 1, 0}, {0, 1, 1}, {0, 1, 2}, {0, 1, -2}, {0, 1, -1}, {0, 2, 0}, {0, 2, 1}, {0, 2, 2}, {0, 2, -2}, {0, 2, -1}, {0, -2, 0}, {0, -2, 1}, {0, -2, 2}, {0, -2, -2}, {0, -2, -1}, {0, -1, 0}, {0, -1, 1}, {0, -1, 2}, {0, -1, -2}, {0, -1, -1}, {1, 0, 0}, {1, 0, 1}, {1, 0, 2}, {1, 0, -2}, {1, 0, -1}, {1, 1, 0}, {1, 1, 1}, {1, 1, 2}, {1, 1, -2}, {1, 1, -1}, {1, 2, 0}, {1, 2, 1}, {1, 2, 2}, {1, 2, -2}, {1, 2, -1}, {1, -2, 0}, {1, -2, 1}, {1, -2, 2}, {1, -2, -2}, {1, -2, -1}, {1, -1, 0}, {1, -1, 1}, {1, -1, 2}, {1, -1, -2}, {1, -1, -1}, {2, 0, 0}, {2, 0, 1}, {2, 0, 2}, {2, 0, -2}, {2, 0, -1}, {2, 1, 0}, {2, 1, 1}, {2, 1, 2}, {2, 1, -2}, {2, 1, -1}, {2, 2, 0}, {2, 2, 1}, {2, 2, 2}, {2, 2, -2}, {2, 2, -1}, {2, -2, 0}, {2, -2, 1}, {2, -2, 2}, {2, -2, -2}, {2, -2, -1}, {2, -1, 0}, {2, -1, 1}, {2, -1, 2}, {2, -1, -2}, {2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static MatINT * get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static MatINT * get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]); static int get_ir_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal); static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]); /* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal) { int num_ir; num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal); return num_ir; } int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3], int map[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const int num_q, SPGCONST double qpoints[][3]) { int num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_ir_reciprocal_mesh(grid_address, map, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } void kpt_get_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = kgd_get_grid_point_double_mesh(address_double, mesh); } } void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[], const int address_orig[3], const MatINT * rot_reciprocal, const int mesh[3], const int is_shift[3], const int bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < rot_reciprocal->size; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal->mat[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_grid_point_double_mesh(address_double, bzmesh)]; } } int kpt_relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } MatINT * kpt_get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { return get_point_group_reciprocal(rotations, is_time_reversal); } MatINT * kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { return get_point_group_reciprocal_with_q(rot_reciprocal, symprec, num_q, qpoints); } /* Return NULL if failed */ static MatINT * get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_rot; MatINT *rot_reciprocal, *rot_return; int *unique_rot; SPGCONST int inversion[3][3] = { {-1, 0, 0}, {0, -1, 0}, {0, 0, -1} }; rot_reciprocal = NULL; rot_return = NULL; unique_rot = NULL; if (is_time_reversal) { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) { return NULL; } } else { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) { return NULL; } } if ((unique_rot = (int *)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { unique_rot[i] = -1; } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size + i], inversion, rot_reciprocal->mat[i]); } } num_rot = 0; for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_rot; j++) { if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]], rot_reciprocal->mat[i])) { goto escape; } } unique_rot[num_rot] = i; num_rot++; escape: ; } if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); } } free(unique_rot); unique_rot = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return rot_return; } /* Return NULL if failed */ static MatINT * get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const int num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok, num_rot; int *ir_rot; double q_rot[3], diff[3]; MatINT *rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; is_all_ok = 0; num_rot = 0; if ((ir_rot = (int *)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, rot_reciprocal->mat[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (!is_all_ok) { break; } } if (is_all_ok) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; } static int get_ir_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT * rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ int i, j, grid_point_rot, num_ir; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); #pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } num_ir = 0; #pragma omp parallel for reduction(+:num_ir) for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { if (ir_mapping_table[i] == i) { num_ir++; } } return num_ir; } /* Relocate grid addresses to first Brillouin zone */ /* bz_grid_address[prod(mesh + 1)][3] */ /* bz_map[prod(mesh * 2)] */ static int relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) { bz_map[i] = -1; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * mesh[2]; /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]) { int i, j; double tolerance; double length[3]; for (i = 0; i < 3; i++) { length[i] = 0; for (j = 0; j < 3; j++) { length[i] += rec_lattice[j][i] * rec_lattice[j][i]; } length[i] /= mesh[i] * mesh[i]; } tolerance = length[0]; for (i = 1; i < 3; i++) { if (tolerance < length[i]) { tolerance = length[i]; } } tolerance *= 0.01; return tolerance; }
sieve.c
/* * Tempo Sequencial * *real 0m4.047s *user 0m3.960s *sys 0m0.080s * *real 0m4.053s *user 0m3.967s *sys 0m0.080s * *real 0m4.042s *user 0m3.962s *sys 0m0.072s * *real 0m4.044s *user 0m3.953s *sys 0m0.084s * *real 0m4.045s *user 0m3.967s *sys 0m0.072s * * Tempo paralelo (Atividade 03) * *real 0m3.700s *user 0m6.857s *sys 0m0.104s * *real 0m3.661s *user 0m6.813s *sys 0m0.072s * *real 0m3.797s *user 0m6.955s *sys 0m0.096s * *real 0m3.647s *user 0m6.817s *sys 0m0.076s * *real 0m3.890s *user 0m7.107s *sys 0m0.088s * * Tempo paralelo(Atividade 07) * *real 0m2.682s *user 0m10.331s *sys 0m0.080s * *real 0m2.684s *user 0m10.331s *sys 0m0.080s * *real 0m2.689s *user 0m10.309s *sys 0m0.083s * *real 0m2.659s *user 0m9.956s *sys 0m0.103s * *real 0m2.698s *user 0m10.333s *sys 0m0.084s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <math.h> int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool*) malloc((n+1)*sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true,(n+1)*sizeof(bool)); int i, p; #pragma omp parallel for schedule (dynamic,100) for (p=2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p #pragma omp parallel for for(i=p*2; i<=n; i += p) prime[i] = false; } } // count prime numbers #pragma omp parallel for reduction(+:primes) for (int p=2; p<=n; p++) if (prime[p]) primes++; return(primes); } int main() { int n = 100000000; printf("%d\n",sieveOfEratosthenes(n)); return 0; }
/* * Tempo Sequencial * *real 0m4.047s *user 0m3.960s *sys 0m0.080s * *real 0m4.053s *user 0m3.967s *sys 0m0.080s * *real 0m4.042s *user 0m3.962s *sys 0m0.072s * *real 0m4.044s *user 0m3.953s *sys 0m0.084s * *real 0m4.045s *user 0m3.967s *sys 0m0.072s * * Tempo paralelo (Atividade 03) * *real 0m3.700s *user 0m6.857s *sys 0m0.104s * *real 0m3.661s *user 0m6.813s *sys 0m0.072s * *real 0m3.797s *user 0m6.955s *sys 0m0.096s * *real 0m3.647s *user 0m6.817s *sys 0m0.076s * *real 0m3.890s *user 0m7.107s *sys 0m0.088s * * Tempo paralelo(Atividade 07) * *real 0m2.682s *user 0m10.331s *sys 0m0.080s * *real 0m2.684s *user 0m10.331s *sys 0m0.080s * *real 0m2.689s *user 0m10.309s *sys 0m0.083s * *real 0m2.659s *user 0m9.956s *sys 0m0.103s * *real 0m2.698s *user 0m10.333s *sys 0m0.084s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <math.h> int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool*) malloc((n+1)*sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true,(n+1)*sizeof(bool)); int i, p; for (p=2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p for(i=p*2; i<=n; i += p) prime[i] = false; } } // count prime numbers for (int p=2; p<=n; p++) if (prime[p]) primes++; return(primes); } int main() { int n = 100000000; printf("%d\n",sieveOfEratosthenes(n)); return 0; }
/* * Tempo Sequencial * *real 0m4.047s *user 0m3.960s *sys 0m0.080s * *real 0m4.053s *user 0m3.967s *sys 0m0.080s * *real 0m4.042s *user 0m3.962s *sys 0m0.072s * *real 0m4.044s *user 0m3.953s *sys 0m0.084s * *real 0m4.045s *user 0m3.967s *sys 0m0.072s * * Tempo paralelo (Atividade 03) * *real 0m3.700s *user 0m6.857s *sys 0m0.104s * *real 0m3.661s *user 0m6.813s *sys 0m0.072s * *real 0m3.797s *user 0m6.955s *sys 0m0.096s * *real 0m3.647s *user 0m6.817s *sys 0m0.076s * *real 0m3.890s *user 0m7.107s *sys 0m0.088s * * Tempo paralelo(Atividade 07) * *real 0m2.682s *user 0m10.331s *sys 0m0.080s * *real 0m2.684s *user 0m10.331s *sys 0m0.080s * *real 0m2.689s *user 0m10.309s *sys 0m0.083s * *real 0m2.659s *user 0m9.956s *sys 0m0.103s * *real 0m2.698s *user 0m10.333s *sys 0m0.084s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <math.h> int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool*) malloc((n+1)*sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true,(n+1)*sizeof(bool)); int i, p; #pragma omp parallel for schedule (dynamic,100) for (p=2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p #pragma omp parallel for for(i=p*2; i<=n; i += p) prime[i] = false; } } // count prime numbers #pragma omp parallel for reduction(+:primes) for (int p=2; p<=n; p++) if (prime[p]) primes++; return(primes); } int main() { int n = 100000000; printf("%d\n",sieveOfEratosthenes(n)); return 0; }
rng.c
#include <stdio.h> #include <omp.h> int main() { int chaos = 10000; int bound = 10; int i; // RNG will purposefully cause concurrency hazard // by incrementing this variable long hazard = 0; #pragma omp parallel for schedule(guided) for (i = 0; i < chaos; i++) hazard++; printf("%ld\n", hazard % bound); }
#include <stdio.h> #include <omp.h> int main() { int chaos = 10000; int bound = 10; int i; //RNG will purposefully cause concurrency hazard // by incrementing this variable long hazard = 0; for (i = 0; i < chaos; i++) hazard++; printf("%ld\n", hazard % bound); }
#include <stdio.h> #include <omp.h> int main() { int chaos = 10000; int bound = 10; int i; //RNG will purposefully cause concurrency hazard // by incrementing this variable long hazard = 0; #pragma omp parallel for schedule(guided) for (i = 0; i < chaos; i++) hazard++; printf("%ld\n", hazard % bound); }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(8*t1+Nx+7,1024)),floord(16*t2+Nx+3,1024)),floord(4*t3+Nx-9,1024)),floord(16*t1-16*t2+Nz+Nx+5,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),256*t4+254);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 2); t1++) { lbp = max(ceild(t1, 2), ceild(4 * t1 - Nt + 2, 4)); ubp = min(floord(4 * Nt + Nz - 9, 16), floord(8 * t1 + Nz + 2, 16)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(1, ceild(16 * t2 - Nz + 9, 4)), 2 * t1 + 1), 4 * t1 - 4 * t2 + 2); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 4), floord(8 * t1 + Ny + 7, 4)), floord(16 * t2 + Ny + 3, 4)), floord(16 * t1 - 16 * t2 + Nz + Ny + 5, 4)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 127, 128)), ceild(16 * t2 - Nz - 1011, 1024)), ceild(4 * t3 - Ny - 1011, 1024)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 1024), floord(8 * t1 + Nx + 7, 1024)), floord(16 * t2 + Nx + 3, 1024)), floord(4 * t3 + Nx - 9, 1024)), floord(16 * t1 - 16 * t2 + Nz + Nx + 5, 1024)); t4++) { for (t5 = max(max(max(max(max(0, ceild(16 * t2 - Nz + 5, 4)), ceild(4 * t3 - Ny + 5, 4)), ceild(1024 * t4 - Nx + 5, 4)), 2 * t1), 4 * t1 - 4 * t2 + 1); t5 <= min(min(min(min(min(floord(16 * t1 - 16 * t2 + Nz + 10, 4), Nt - 1), 2 * t1 + 3), 4 * t2 + 2), t3 - 1), 256 * t4 + 254); t5++) { for (t6 = max(max(16 * t2, 4 * t5 + 4), -16 * t1 + 16 * t2 + 8 * t5 - 15); t6 <= min(min(16 * t2 + 15, -16 * t1 + 16 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = 4 * t3; t7 <= min(4 * t3 + 3, 4 * t5 + Ny - 5); t7++) { lbv = max(1024 * t4, 4 * t5 + 4); ubv = min(1024 * t4 + 1023, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= floord(Nt - 1, 2); t1++) { lbp = max(ceild(t1, 2), ceild(4 * t1 - Nt + 2, 4)); ubp = min(floord(4 * Nt + Nz - 9, 16), floord(8 * t1 + Nz + 2, 16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(max(max(1, ceild(16 * t2 - Nz + 9, 4)), 2 * t1 + 1), 4 * t1 - 4 * t2 + 2); t3 <= min(min(min(floord(4 * Nt + Ny - 9, 4), floord(8 * t1 + Ny + 7, 4)), floord(16 * t2 + Ny + 3, 4)), floord(16 * t1 - 16 * t2 + Nz + Ny + 5, 4)); t3++) { for (t4 = max(max(max(0, ceild(t1 - 127, 128)), ceild(16 * t2 - Nz - 1011, 1024)), ceild(4 * t3 - Ny - 1011, 1024)); t4 <= min(min(min(min(floord(4 * Nt + Nx - 9, 1024), floord(8 * t1 + Nx + 7, 1024)), floord(16 * t2 + Nx + 3, 1024)), floord(4 * t3 + Nx - 9, 1024)), floord(16 * t1 - 16 * t2 + Nz + Nx + 5, 1024)); t4++) { for (t5 = max(max(max(max(max(0, ceild(16 * t2 - Nz + 5, 4)), ceild(4 * t3 - Ny + 5, 4)), ceild(1024 * t4 - Nx + 5, 4)), 2 * t1), 4 * t1 - 4 * t2 + 1); t5 <= min(min(min(min(min(floord(16 * t1 - 16 * t2 + Nz + 10, 4), Nt - 1), 2 * t1 + 3), 4 * t2 + 2), t3 - 1), 256 * t4 + 254); t5++) { for (t6 = max(max(16 * t2, 4 * t5 + 4), -16 * t1 + 16 * t2 + 8 * t5 - 15); t6 <= min(min(16 * t2 + 15, -16 * t1 + 16 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = 4 * t3; t7 <= min(4 * t3 + 3, 4 * t5 + Ny - 5); t7++) { lbv = max(1024 * t4, 4 * t5 + 4); ubv = min(1024 * t4 + 1023, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext * getCurrentMangleNumberContext(const DeclContext *DC, Decl *&ManglingContextDecl, bool SkpNoODRChk = false, bool *Forced = nullptr); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. struct OpenMPDeclareVariantCtsSelectorData { OMPDeclareVariantAttr::CtxSelectorSetType CtxSet = OMPDeclareVariantAttr::CtxSetUnknown; OMPDeclareVariantAttr::CtxSelectorType Ctx = OMPDeclareVariantAttr::CtxUnknown; StringRef ImplVendor; ExprResult CtxScore; explicit OpenMPDeclareVariantCtsSelectorData() = default; explicit OpenMPDeclareVariantCtsSelectorData( OMPDeclareVariantAttr::CtxSelectorSetType CtxSet, OMPDeclareVariantAttr::CtxSelectorType Ctx, StringRef ImplVendor, ExprResult CtxScore) : CtxSet(CtxSet), Ctx(Ctx), ImplVendor(ImplVendor), CtxScore(CtxScore) { } }; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, SourceRange SR, const Sema::OpenMPDeclareVariantCtsSelectorData &Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext * getCurrentMangleNumberContext(const DeclContext *DC, Decl *&ManglingContextDecl, bool SkpNoODRChk = false, bool *Forced = nullptr); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested ' unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. struct OpenMPDeclareVariantCtsSelectorData { OMPDeclareVariantAttr::CtxSelectorSetType CtxSet = OMPDeclareVariantAttr::CtxSetUnknown; OMPDeclareVariantAttr::CtxSelectorType Ctx = OMPDeclareVariantAttr::CtxUnknown; StringRef ImplVendor; ExprResult CtxScore; explicit OpenMPDeclareVariantCtsSelectorData() = default; explicit OpenMPDeclareVariantCtsSelectorData( OMPDeclareVariantAttr::CtxSelectorSetType CtxSet, OMPDeclareVariantAttr::CtxSelectorType Ctx, StringRef ImplVendor, ExprResult CtxScore) : CtxSet(CtxSet), Ctx(Ctx), ImplVendor(ImplVendor), CtxScore(CtxScore) { } }; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed ' DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of ' DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of ' OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of ' void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of ' DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. ' bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the ' void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\ /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\ /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\ /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\ /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\ /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, SourceRange SR, const Sema::OpenMPDeclareVariantCtsSelectorData &Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext * getCurrentMangleNumberContext(const DeclContext *DC, Decl *&ManglingContextDecl, bool SkpNoODRChk = false, bool *Forced = nullptr); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckForDelayedContext = true); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee, bool CheckCaller = true); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Marks all the functions that might be required for the currently active /// OpenMP context. void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse); public: /// Struct to store the context selectors info for declare variant directive. struct OpenMPDeclareVariantCtsSelectorData { OMPDeclareVariantAttr::CtxSelectorSetType CtxSet = OMPDeclareVariantAttr::CtxSetUnknown; OMPDeclareVariantAttr::CtxSelectorType Ctx = OMPDeclareVariantAttr::CtxUnknown; StringRef ImplVendor; ExprResult CtxScore; explicit OpenMPDeclareVariantCtsSelectorData() = default; explicit OpenMPDeclareVariantCtsSelectorData( OMPDeclareVariantAttr::CtxSelectorSetType CtxSet, OMPDeclareVariantAttr::CtxSelectorType Ctx, StringRef ImplVendor, ExprResult CtxScore) : CtxSet(CtxSet), Ctx(Ctx), ImplVendor(ImplVendor), CtxScore(CtxScore) { } }; /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction( DeclGroupPtrTy DG, Expr *VariantRef, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param Data Set of context-specific data for the specified context /// selector. void ActOnOpenMPDeclareVariantDirective( FunctionDecl *FD, Expr *VariantRef, SourceRange SR, const Sema::OpenMPDeclareVariantCtsSelectorData &Data); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
original.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel { #pragma omp for private (i) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp for private (j) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) data[i][j] -= mean[j]; /* Calculate the m * m covariance matrix. */ #pragma omp for private (j2, i) for (j1 = 0; j1 < _PB_M; j1++) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array(int m, int n, DATA_TYPE * float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i * j) / M; } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m), DATA_TYPE POLYBENCH_1D(mean, M, m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) data[i][j] -= mean[j]; /* Calculate the m * m covariance matrix. */ for (j1 = 0; j1 < _PB_M; j1++) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } #pragma endscop } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data, DATA_TYPE, M, N, m, n); POLYBENCH_2D_ARRAY_DECL(symmat, DATA_TYPE, M, M, m, m); POLYBENCH_1D_ARRAY_DECL(mean, DATA_TYPE, M, m); /* Initialize array(s). */ init_array(m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance(m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array(int m, int n, DATA_TYPE * float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i * j) / M; } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data, M, N, m, n), DATA_TYPE POLYBENCH_2D(symmat, M, M, m, m), DATA_TYPE POLYBENCH_1D(mean, M, m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel { #pragma omp for private (i) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp for private (j) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) data[i][j] -= mean[j]; /* Calculate the m * m covariance matrix. */ #pragma omp for private (j2, i) for (j1 = 0; j1 < _PB_M; j1++) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } #pragma endscop } int main(int argc, char **argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data, DATA_TYPE, M, N, m, n); POLYBENCH_2D_ARRAY_DECL(symmat, DATA_TYPE, M, M, m, m); POLYBENCH_1D_ARRAY_DECL(mean, DATA_TYPE, M, m); /* Initialize array(s). */ init_array(m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance(m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
GB_unaryop__identity_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_fp64 // op(A') function: GB_tran__identity_uint16_fp64 // C type: uint16_t // A type: double // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_fp64 ( uint16_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_fp64 // op(A') function: GB_tran__identity_uint16_fp64 // C type: uint16_t // A type: double // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_fp64 ( uint16_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_fp64 // op(A') function: GB_tran__identity_uint16_fp64 // C type: uint16_t // A type: double // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_fp64 ( uint16_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gemm.c
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef GPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaDeviceSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { char A_PART = A[i * lda + k]; if (A_PART) { for (j = 0; j < N; ++j) { C[i * ldc + j] += B[k * ldb + j]; } } else { for (j = 0; j < N; ++j) { C[i * ldc + j] -= B[k * ldb + j]; } } } } } float * random_matrix(int rows, int cols) { int i; float *m = calloc(rows * cols, sizeof(float)); for (i = 0; i < rows * cols; ++i) { m[i] = (float)rand() / RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if (!TA) a = random_matrix(m, k); else a = random_matrix(k, m); int lda = (!TA) ? k : m; float *b; if (!TB) b = random_matrix(k, n); else b = random_matrix(n, k); int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); int i; clock_t start = clock(), end; for (i = 0; i < 10; ++i) { gemm_cpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n", m, k, k, n, TA, TB, (float)(end - start) / CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu(TA, TB, M, N, K, ALPHA, A, lda, B, ldb, BETA, C, ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i * ldc + j] += A_PART * B[k * ldb + j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { register float sum = 0; for (k = 0; k < K; ++k) { sum += ALPHA * A[i * lda + k] * B[j * ldb + k]; } C[i * ldc + j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA * A[k * lda + i]; for (j = 0; j < N; ++j) { C[i * ldc + j] += A_PART * B[k * ldb + j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { register float sum = 0; for (k = 0; k < K; ++k) { sum += ALPHA * A[i + k * lda] * B[k + j * ldb]; } C[i * ldc + j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n", TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { C[i * ldc + j] *= BETA; } } if (!TA && !TB) gemm_nn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); else if (TA && !TB) gemm_tn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); else if (!TA && TB) gemm_nt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); else gemm_tt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); } #ifdef GPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if (!TA) a = random_matrix(m, k); else a = random_matrix(k, m); int lda = (!TA) ? k : m; float *b; if (!TB) b = random_matrix(k, n); else b = random_matrix(n, k); int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); int i; clock_t start = clock(), end; for (i = 0; i < 32; ++i) { gemm_gpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n", m, k, k, n, TA, TB, (float)(end - start) / CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m, k); float *b = random_matrix(k, n); int lda = (!TA) ? k : m; int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); float *a_cl = cuda_make_array(a, m * k); float *b_cl = cuda_make_array(b, k * n); float *c_cl = cuda_make_array(c, m * n); int i; clock_t start = clock(), end; for (i = 0; i < iter; ++i) { gemm_gpu(TA, TB, m, n, k, 1, a_cl, lda, b_cl, ldb, 1, c_cl, n); cudaDeviceSynchronize(); } double flop = ((double)m) * n * (2. * k + 2.) * iter; double gflop = flop / pow(10., 9); end = clock(); double seconds = sec(end - start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n", m, k, k, n, TA, TB, seconds, gflop / seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if (!TA) a = random_matrix(m, k); else a = random_matrix(k, m); int lda = (!TA) ? k : m; float *b; if (!TB) b = random_matrix(k, n); else b = random_matrix(n, k); int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); float *c_gpu = random_matrix(m, n); memset(c, 0, m * n * sizeof(float)); memset(c_gpu, 0, m * n * sizeof(float)); int i; //pm(m, k, b); gemm_gpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c_gpu, n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for (i = 0; i < m * n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i] - c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n", m, k, k, n, TA, TB, sse / (m * n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* * test_gpu_accuracy(0,0,10,576,75); * * test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); * test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); * * test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); * test_gpu_accuracy(0,1,1000,10,100); * test_gpu_accuracy(1,1,1000,10,100); * * test_gpu_accuracy(0,0,10,10,10); * * time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); * time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); * time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); * time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); * time_gpu(0,0,128,4096,4096); */ time_gpu(0, 0, 64, 75, 12544); time_gpu(0, 0, 64, 75, 12544); time_gpu(0, 0, 64, 75, 12544); time_gpu(0, 0, 64, 576, 12544); time_gpu(0, 0, 256, 2304, 784); time_gpu(1, 1, 2304, 256, 784); time_gpu(0, 0, 512, 4608, 196); time_gpu(1, 1, 4608, 512, 196); return 0; } #endif
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { char A_PART = A[i * lda + k]; if (A_PART) { for (j = 0; j < N; ++j) { C[i * ldc + j] += B[k * ldb + j]; } } else { for (j = 0; j < N; ++j) { C[i * ldc + j] -= B[k * ldb + j]; } } } } } float * random_matrix(int rows, int cols) { int i; float *m = calloc(rows * cols, sizeof(float)); for (i = 0; i < rows * cols; ++i) { m[i] = (float)rand() / RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if (!TA) a = random_matrix(m, k); else a = random_matrix(k, m); int lda = (!TA) ? k : m; float *b; if (!TB) b = random_matrix(k, n); else b = random_matrix(n, k); int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); int i; clock_t start = clock(), end; for (i = 0; i < 10; ++i) { gemm_cpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n", m, k, k, n, TA, TB, (float)(end - start) / CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu(TA, TB, M, N, K, ALPHA, A, lda, B, ldb, BETA, C, ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA * A[i * lda + k]; for (j = 0; j < N; ++j) { C[i * ldc + j] += A_PART * B[k * ldb + j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { register float sum = 0; for (k = 0; k < K; ++k) { sum += ALPHA * A[i * lda + k] * B[j * ldb + k]; } C[i * ldc + j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register float A_PART = ALPHA * A[k * lda + i]; for (j = 0; j < N; ++j) { C[i * ldc + j] += A_PART * B[k * ldb + j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i, j, k; #pragma omp parallel for for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { register float sum = 0; for (k = 0; k < K; ++k) { sum += ALPHA * A[i + k * lda] * B[k + j * ldb]; } C[i * ldc + j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n", TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for (i = 0; i < M; ++i) { for (j = 0; j < N; ++j) { C[i * ldc + j] *= BETA; } } if (!TA && !TB) gemm_nn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); else if (TA && !TB) gemm_tn(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); else if (!TA && TB) gemm_nt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); else gemm_tt(M, N, K, ALPHA, A, lda, B, ldb, C, ldc); } #ifdef GPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_error(status); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if (!TA) a = random_matrix(m, k); else a = random_matrix(k, m); int lda = (!TA) ? k : m; float *b; if (!TB) b = random_matrix(k, n); else b = random_matrix(n, k); int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); int i; clock_t start = clock(), end; for (i = 0; i < 32; ++i) { gemm_gpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n", m, k, k, n, TA, TB, (float)(end - start) / CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m, k); float *b = random_matrix(k, n); int lda = (!TA) ? k : m; int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); float *a_cl = cuda_make_array(a, m * k); float *b_cl = cuda_make_array(b, k * n); float *c_cl = cuda_make_array(c, m * n); int i; clock_t start = clock(), end; for (i = 0; i < iter; ++i) { gemm_gpu(TA, TB, m, n, k, 1, a_cl, lda, b_cl, ldb, 1, c_cl, n); cudaDeviceSynchronize(); } double flop = ((double)m) * n * (2. * k + 2.) * iter; double gflop = flop / pow(10., 9); end = clock(); double seconds = sec(end - start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n", m, k, k, n, TA, TB, seconds, gflop / seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if (!TA) a = random_matrix(m, k); else a = random_matrix(k, m); int lda = (!TA) ? k : m; float *b; if (!TB) b = random_matrix(k, n); else b = random_matrix(n, k); int ldb = (!TB) ? n : k; float *c = random_matrix(m, n); float *c_gpu = random_matrix(m, n); memset(c, 0, m * n * sizeof(float)); memset(c_gpu, 0, m * n * sizeof(float)); int i; //pm(m, k, b); gemm_gpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c_gpu, n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA, TB, m, n, k, 1, a, lda, b, ldb, 1, c, n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for (i = 0; i < m * n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i] - c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n", m, k, k, n, TA, TB, sse / (m * n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* * test_gpu_accuracy(0,0,10,576,75); * * test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); * test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); * * test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); * test_gpu_accuracy(0,1,1000,10,100); * test_gpu_accuracy(1,1,1000,10,100); * * test_gpu_accuracy(0,0,10,10,10); * * time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); * time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); * time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); * time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); * time_gpu(0,0,128,4096,4096); */ time_gpu(0, 0, 64, 75, 12544); time_gpu(0, 0, 64, 75, 12544); time_gpu(0, 0, 64, 75, 12544); time_gpu(0, 0, 64, 576, 12544); time_gpu(0, 0, 256, 2304, 784); time_gpu(1, 1, 2304, 256, 784); time_gpu(0, 0, 512, 4608, 196); time_gpu(1, 1, 4608, 512, 196); return 0; } #endif
nstream-target.c
/// /// Copyright (c) 2019, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors and /// the length of the vectors. /// /// <progname> <# iterations> <vector length> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// Converted to C11 by Jeff Hammond, February 2019. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_openmp.h" int main(int argc, char * argv[]) { printf("Parallel Research Kernels version %d\n", PRKVERSION ); printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n"); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// if (argc < 3) { printf("Usage: <# iterations> <vector length>\n"); return 1; } int iterations = atoi(argv[1]); if (iterations < 1) { printf("ERROR: iterations must be >= 1\n"); return 1; } // length of a the vector size_t length = atol(argv[2]); if (length <= 0) { printf("ERROR: Vector length must be greater than 0\n"); return 1; } int device = (argc > 3) ? atol(argv[3]) : omp_get_default_device(); if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_default_device()) ) { printf("ERROR: device number %d is not valid.\n", device); return 1; } printf("Number of iterations = %d\n", iterations); printf("Vector length = %zu\n", length); printf("OpenMP Device = %d\n", device); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double nstream_time = 0.0; size_t bytes = length*sizeof(double); double * restrict A = prk_malloc(bytes); double * restrict B = prk_malloc(bytes); double * restrict C = prk_malloc(bytes); double scalar = 3.0; #pragma omp parallel for simd for (size_t i=0; i<length; i++) { A[i] = 0.0; B[i] = 2.0; C[i] = 2.0; } #pragma omp target data map(tofrom: A[0:length]) map(to: B[0:length], C[0:length]) { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = omp_get_wtime(); #pragma omp target teams distribute parallel for simd for (size_t i=0; i<length; i++) { A[i] += B[i] + scalar * C[i]; } } nstream_time = omp_get_wtime() - nstream_time; } prk_free(C); prk_free(B); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar = 0.0; double br = 2.0; double cr = 2.0; for (int i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum = 0.0; #pragma omp parallel for reduction(+:asum) for (size_t i=0; i<length; i++) { asum += fabs(A[i]); } prk_free(A); double epsilon=1.e-8; if (fabs(ar-asum)/asum > epsilon) { printf("Failed Validation on output array\n" " Expected checksum: %lf\n" " Observed checksum: %lf\n" "ERROR: solution did not validate\n", ar, asum); return 1; } else { printf("Solution validates\n"); double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(double); printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime); } return 0; }
/// /// Copyright (c) 2019, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors and /// the length of the vectors. /// /// <progname> <# iterations> <vector length> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// Converted to C11 by Jeff Hammond, February 2019. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_openmp.h" int main(int argc, char * argv[]) { printf("Parallel Research Kernels version %d\n", PRKVERSION ); printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n"); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// if (argc < 3) { printf("Usage: <# iterations> <vector length>\n"); return 1; } int iterations = atoi(argv[1]); if (iterations < 1) { printf("ERROR: iterations must be >= 1\n"); return 1; } // length of a the vector size_t length = atol(argv[2]); if (length <= 0) { printf("ERROR: Vector length must be greater than 0\n"); return 1; } int device = (argc > 3) ? atol(argv[3]) : omp_get_default_device(); if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_default_device()) ) { printf("ERROR: device number %d is not valid.\n", device); return 1; } printf("Number of iterations = %d\n", iterations); printf("Vector length = %zu\n", length); printf("OpenMP Device = %d\n", device); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double nstream_time = 0.0; size_t bytes = length*sizeof(double); double * restrict A = prk_malloc(bytes); double * restrict B = prk_malloc(bytes); double * restrict C = prk_malloc(bytes); double scalar = 3.0; for (size_t i=0; i<length; i++) { A[i] = 0.0; B[i] = 2.0; C[i] = 2.0; } for (int iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = omp_get_wtime(); for (size_t i=0; i<length; i++) { A[i] += B[i] + scalar * C[i]; } } nstream_time = omp_get_wtime() - nstream_time; prk_free(C); prk_free(B); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar = 0.0; double br = 2.0; double cr = 2.0; for (int i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum = 0.0; for (size_t i=0; i<length; i++) { asum += fabs(A[i]); } prk_free(A); double epsilon=1.e-8; if (fabs(ar-asum)/asum > epsilon) { printf("Failed Validation on output array\n" " Expected checksum: %lf\n" " Observed checksum: %lf\n" "ERROR: solution did not validate\n", ar, asum); return 1; } else { printf("Solution validates\n"); double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(double); printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime); } return 0; }
/// /// Copyright (c) 2019, Intel Corporation /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of Intel Corporation nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: nstream /// /// PURPOSE: To compute memory bandwidth when adding a vector of a given /// number of double precision values to the scalar multiple of /// another vector of the same length, and storing the result in /// a third vector. /// /// USAGE: The program takes as input the number /// of iterations to loop over the triad vectors and /// the length of the vectors. /// /// <progname> <# iterations> <vector length> /// /// The output consists of diagnostics to make sure the /// algorithm worked, and of timing statistics. /// /// NOTES: Bandwidth is determined as the number of words read, plus the /// number of words written, times the size of the words, divided /// by the execution time. For a vector length of N, the total /// number of words read and written is 4*N*sizeof(double). /// /// /// HISTORY: This code is loosely based on the Stream benchmark by John /// McCalpin, but does not follow all the Stream rules. Hence, /// reported results should not be associated with Stream in /// external publications /// /// Converted to C++11 by Jeff Hammond, November 2017. /// Converted to C11 by Jeff Hammond, February 2019. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_openmp.h" int main(int argc, char * argv[]) { printf("Parallel Research Kernels version %d\n", PRKVERSION ); printf("C11/OpenMP TARGET STREAM triad: A = B + scalar * C\n"); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// if (argc < 3) { printf("Usage: <# iterations> <vector length>\n"); return 1; } int iterations = atoi(argv[1]); if (iterations < 1) { printf("ERROR: iterations must be >= 1\n"); return 1; } // length of a the vector size_t length = atol(argv[2]); if (length <= 0) { printf("ERROR: Vector length must be greater than 0\n"); return 1; } int device = (argc > 3) ? atol(argv[3]) : omp_get_default_device(); if ( (device < 0 || omp_get_num_devices() <= device ) && (device != omp_get_default_device()) ) { printf("ERROR: device number %d is not valid.\n", device); return 1; } printf("Number of iterations = %d\n", iterations); printf("Vector length = %zu\n", length); printf("OpenMP Device = %d\n", device); ////////////////////////////////////////////////////////////////////// // Allocate space and perform the computation ////////////////////////////////////////////////////////////////////// double nstream_time = 0.0; size_t bytes = length*sizeof(double); double * restrict A = prk_malloc(bytes); double * restrict B = prk_malloc(bytes); double * restrict C = prk_malloc(bytes); double scalar = 3.0; #pragma omp parallel for simd for (size_t i=0; i<length; i++) { A[i] = 0.0; B[i] = 2.0; C[i] = 2.0; } #pragma omp target data map(tofrom: A[0:length]) map(to: B[0:length], C[0:length]) { for (int iter = 0; iter<=iterations; iter++) { if (iter==1) nstream_time = omp_get_wtime(); #pragma omp target teams distribute parallel for simd for (size_t i=0; i<length; i++) { A[i] += B[i] + scalar * C[i]; } } nstream_time = omp_get_wtime() - nstream_time; } prk_free(C); prk_free(B); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// double ar = 0.0; double br = 2.0; double cr = 2.0; for (int i=0; i<=iterations; i++) { ar += br + scalar * cr; } ar *= length; double asum = 0.0; #pragma omp parallel for reduction(+:asum) for (size_t i=0; i<length; i++) { asum += fabs(A[i]); } prk_free(A); double epsilon=1.e-8; if (fabs(ar-asum)/asum > epsilon) { printf("Failed Validation on output array\n" " Expected checksum: %lf\n" " Observed checksum: %lf\n" "ERROR: solution did not validate\n", ar, asum); return 1; } else { printf("Solution validates\n"); double avgtime = nstream_time/iterations; double nbytes = 4.0 * length * sizeof(double); printf("Rate (MB/s): %lf Avg time (s): %lf\n", 1.e-6*nbytes/avgtime, avgtime); } return 0; }
conv_dw_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_dw_kernel_x86.h" #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = max(data[i], ( float )0); if (activation > 0) { data[i] = min(data[i], ( float )activation); } } } void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v) { float* ptr = input; float* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(float)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } #if __AVX__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _sum4 = _mm256_loadu_ps(btmp); __m256 _sum5 = _mm256_loadu_ps(btmp); __m256 _sum6 = _mm256_loadu_ps(btmp); __m256 _sum7 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _va9 = _mm256_loadu_ps(itmp0 + 72); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _va9 = _mm256_loadu_ps(itmp1 + 72); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _va9 = _mm256_loadu_ps(itmp2 + 72); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); _mm256_storeu_ps(otmp + 32, _sum4); _mm256_storeu_ps(otmp + 40, _sum5); _mm256_storeu_ps(otmp + 48, _sum6); _mm256_storeu_ps(otmp + 56, _sum7); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #elif __SSE2__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 7 < outw; j += 8) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _sum4 = _mm_loadu_ps(btmp); __m128 _sum5 = _mm_loadu_ps(btmp); __m128 _sum6 = _mm_loadu_ps(btmp); __m128 _sum7 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _va9 = _mm_loadu_ps(itmp0 + 36); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _va9 = _mm_loadu_ps(itmp1 + 36); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _va9 = _mm_loadu_ps(itmp2 + 36); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); _mm_storeu_ps(otmp + 16, _sum4); _mm_storeu_ps(otmp + 20, _sum5); _mm_storeu_ps(otmp + 24, _sum6); _mm_storeu_ps(otmp + 28, _sum7); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; sum4[k] += itmp0[k + 16] * ktmp[k]; sum4[k] += itmp1[k + 16] * ktmp[k + 12]; sum4[k] += itmp2[k + 16] * ktmp[k + 24]; sum4[k] += itmp0[k + 20] * ktmp[k + 4]; sum4[k] += itmp1[k + 20] * ktmp[k + 16]; sum4[k] += itmp2[k + 20] * ktmp[k + 28]; sum4[k] += itmp0[k + 24] * ktmp[k + 8]; sum4[k] += itmp1[k + 24] * ktmp[k + 20]; sum4[k] += itmp2[k + 24] * ktmp[k + 32]; sum5[k] += itmp0[k + 20] * ktmp[k]; sum5[k] += itmp1[k + 20] * ktmp[k + 12]; sum5[k] += itmp2[k + 20] * ktmp[k + 24]; sum5[k] += itmp0[k + 24] * ktmp[k + 4]; sum5[k] += itmp1[k + 24] * ktmp[k + 16]; sum5[k] += itmp2[k + 24] * ktmp[k + 28]; sum5[k] += itmp0[k + 28] * ktmp[k + 8]; sum5[k] += itmp1[k + 28] * ktmp[k + 20]; sum5[k] += itmp2[k + 28] * ktmp[k + 32]; sum6[k] += itmp0[k + 24] * ktmp[k]; sum6[k] += itmp1[k + 24] * ktmp[k + 12]; sum6[k] += itmp2[k + 24] * ktmp[k + 24]; sum6[k] += itmp0[k + 28] * ktmp[k + 4]; sum6[k] += itmp1[k + 28] * ktmp[k + 16]; sum6[k] += itmp2[k + 28] * ktmp[k + 28]; sum6[k] += itmp0[k + 32] * ktmp[k + 8]; sum6[k] += itmp1[k + 32] * ktmp[k + 20]; sum6[k] += itmp2[k + 32] * ktmp[k + 32]; sum7[k] += itmp0[k + 28] * ktmp[k]; sum7[k] += itmp1[k + 28] * ktmp[k + 12]; sum7[k] += itmp2[k + 28] * ktmp[k + 24]; sum7[k] += itmp0[k + 32] * ktmp[k + 4]; sum7[k] += itmp1[k + 32] * ktmp[k + 16]; sum7[k] += itmp2[k + 32] * ktmp[k + 28]; sum7[k] += itmp0[k + 36] * ktmp[k + 8]; sum7[k] += itmp1[k + 36] * ktmp[k + 20]; sum7[k] += itmp2[k + 36] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; otmp[k + 16] = sum4[k]; otmp[k + 20] = sum5[k]; otmp[k + 24] = sum6[k]; otmp[k + 28] = sum7[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 4; itmp1 += 4; itmp2 += 4; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 8] * ktmp[k]; sum1[k] += itmp1[k + 8] * ktmp[k + 12]; sum1[k] += itmp2[k + 8] * ktmp[k + 24]; sum1[k] += itmp0[k + 12] * ktmp[k + 4]; sum1[k] += itmp1[k + 12] * ktmp[k + 16]; sum1[k] += itmp2[k + 12] * ktmp[k + 28]; sum1[k] += itmp0[k + 16] * ktmp[k + 8]; sum1[k] += itmp1[k + 16] * ktmp[k + 20]; sum1[k] += itmp2[k + 16] * ktmp[k + 32]; sum2[k] += itmp0[k + 16] * ktmp[k]; sum2[k] += itmp1[k + 16] * ktmp[k + 12]; sum2[k] += itmp2[k + 16] * ktmp[k + 24]; sum2[k] += itmp0[k + 20] * ktmp[k + 4]; sum2[k] += itmp1[k + 20] * ktmp[k + 16]; sum2[k] += itmp2[k + 20] * ktmp[k + 28]; sum2[k] += itmp0[k + 24] * ktmp[k + 8]; sum2[k] += itmp1[k + 24] * ktmp[k + 20]; sum2[k] += itmp2[k + 24] * ktmp[k + 32]; sum3[k] += itmp0[k + 24] * ktmp[k]; sum3[k] += itmp1[k + 24] * ktmp[k + 12]; sum3[k] += itmp2[k + 24] * ktmp[k + 24]; sum3[k] += itmp0[k + 28] * ktmp[k + 4]; sum3[k] += itmp1[k + 28] * ktmp[k + 16]; sum3[k] += itmp2[k + 28] * ktmp[k + 28]; sum3[k] += itmp0[k + 32] * ktmp[k + 8]; sum3[k] += itmp1[k + 32] * ktmp[k + 20]; sum3[k] += itmp2[k + 32] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #else static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; float* outptr2 = outptr + outw; const float bias0 = _bias ? _bias[g] : 0.f; const float* kernel0 = kernel + g * 9; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; const float* kernel0 = kernel + g * 9; const float bias0 = _bias ? _bias[g] : 0.f; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } #endif int conv_dw_run(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_param* param, int num_thread, int cpu_affinity) { float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* kernel = ( float* )weight_tensor->data; float* biases = NULL; if (bias_tensor) biases = ( float* )bias_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_chw = inc * inh * inw; int outc = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_chw = out_hw * outc; int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int stride_w = param->stride_w; int stride_h = param->stride_h; int dilation_w = param->dilation_w; int dilation_h = param->dilation_h; int group = param->group; int activation = param->activation; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; float* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input; else { input_tmp = ( float* )sys_malloc(inh_tmp * inw_tmp * group * sizeof(float)); for (int g = 0; g < group; g++) { float* pad_in = input + g * inh * inw; float* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f); } } /* process */ for (int i = 0; i < batch_number; i++) { if (stride_h == 1) convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); else convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); } /* relu */ if (activation >= 0) relu(output, batch_number * out_chw, activation); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; }
/* * Copyright (c) 2020, OPEN AI LAB Author: qtang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_dw_kernel_x86.h" #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) void relu(float *data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = max(data[i], (float)0); if (activation > 0) { data[i] = min(data[i], (float)activation); } } } void pad(float *input, float *output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v) { float *ptr = input; float *outptr = output; int y = 0; //fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } //fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(float)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } //fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } #if __AVX__ static void convdw3x3s1(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); //generate the image tmp float *img_tmp = (float *)sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float *k0 = img_data + (ii + 0) * inwh; const float *k1 = img_data + (ii + 1) * inwh; const float *k2 = img_data + (ii + 2) * inwh; const float *k3 = img_data + (ii + 3) * inwh; const float *k4 = img_data + (ii + 4) * inwh; const float *k5 = img_data + (ii + 5) * inwh; const float *k6 = img_data + (ii + 6) * inwh; const float *k7 = img_data + (ii + 7) * inwh; const float *f0 = kernel_data + (ii + 0) * 9; const float *f1 = kernel_data + (ii + 1) * 9; const float *f2 = kernel_data + (ii + 2) * 9; const float *f3 = kernel_data + (ii + 3) * 9; const float *f4 = kernel_data + (ii + 4) * 9; const float *f5 = kernel_data + (ii + 5) * 9; const float *f6 = kernel_data + (ii + 6) * 9; const float *f7 = kernel_data + (ii + 7) * 9; const float *b0 = bias_data + (ii + 0); const float *b1 = bias_data + (ii + 1); const float *b2 = bias_data + (ii + 2); const float *b3 = bias_data + (ii + 3); const float *b4 = bias_data + (ii + 4); const float *b5 = bias_data + (ii + 5); const float *b6 = bias_data + (ii + 6); const float *b7 = bias_data + (ii + 7); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 8 * 9; float *btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw; float *itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw; float *itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw; float *otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _sum4 = _mm256_loadu_ps(btmp); __m256 _sum5 = _mm256_loadu_ps(btmp); __m256 _sum6 = _mm256_loadu_ps(btmp); __m256 _sum7 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _va9 = _mm256_loadu_ps(itmp0 + 72); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _va9 = _mm256_loadu_ps(itmp1 + 72); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _va9 = _mm256_loadu_ps(itmp2 + 72); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); _mm256_storeu_ps(otmp + 32, _sum4); _mm256_storeu_ps(otmp + 40, _sum5); _mm256_storeu_ps(otmp + 48, _sum6); _mm256_storeu_ps(otmp + 56, _sum7); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 8; } } } //load_data { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 8 * outwh; float *tmp0 = output + i * 8 * outwh; float *tmp1 = output + i * 8 * outwh + 1 * outwh; float *tmp2 = output + i * 8 * outwh + 2 * outwh; float *tmp3 = output + i * 8 * outwh + 3 * outwh; float *tmp4 = output + i * 8 * outwh + 4 * outwh; float *tmp5 = output + i * 8 * outwh + 5 * outwh; float *tmp6 = output + i * 8 * outwh + 6 * outwh; float *tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *otmp = output_tmp + ii * outwh; float *tmp0 = output + ii * outwh; float *tmp1 = output + ii * outwh + 1 * outwh; float *tmp2 = output + ii * outwh + 2 * outwh; float *tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *otmp = output_tmp + channel_count * 8 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); //generate the image tmp float *img_tmp = (float *)sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float *k0 = img_data + (ii + 0) * inwh; const float *k1 = img_data + (ii + 1) * inwh; const float *k2 = img_data + (ii + 2) * inwh; const float *k3 = img_data + (ii + 3) * inwh; const float *k4 = img_data + (ii + 4) * inwh; const float *k5 = img_data + (ii + 5) * inwh; const float *k6 = img_data + (ii + 6) * inwh; const float *k7 = img_data + (ii + 7) * inwh; const float *f0 = kernel_data + (ii + 0) * 9; const float *f1 = kernel_data + (ii + 1) * 9; const float *f2 = kernel_data + (ii + 2) * 9; const float *f3 = kernel_data + (ii + 3) * 9; const float *f4 = kernel_data + (ii + 4) * 9; const float *f5 = kernel_data + (ii + 5) * 9; const float *f6 = kernel_data + (ii + 6) * 9; const float *f7 = kernel_data + (ii + 7) * 9; const float *b0 = bias_data + (ii + 0); const float *b1 = bias_data + (ii + 1); const float *b2 = bias_data + (ii + 2); const float *b3 = bias_data + (ii + 3); const float *b4 = bias_data + (ii + 4); const float *b5 = bias_data + (ii + 5); const float *b6 = bias_data + (ii + 6); const float *b7 = bias_data + (ii + 7); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 8 * 9; float *btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw; float *itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw; float *itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw; float *otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 8; } } } //load_data { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 8 * outwh; float *tmp0 = output + i * 8 * outwh; float *tmp1 = output + i * 8 * outwh + 1 * outwh; float *tmp2 = output + i * 8 * outwh + 2 * outwh; float *tmp3 = output + i * 8 * outwh + 3 * outwh; float *tmp4 = output + i * 8 * outwh + 4 * outwh; float *tmp5 = output + i * 8 * outwh + 5 * outwh; float *tmp6 = output + i * 8 * outwh + 6 * outwh; float *tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *otmp = output_tmp + ii * outwh; float *tmp0 = output + ii * outwh; float *tmp1 = output + ii * outwh + 1 * outwh; float *tmp2 = output + ii * outwh + 2 * outwh; float *tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *otmp = output_tmp + channel_count * 8 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #elif __SSE2__ static void convdw3x3s1(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); //generate the image tmp float *img_tmp = (float *)sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 4 * inwh; float *tmp1 = kernel_tmp + channel_count * 4 * 9; float *tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 4 * 9; float *btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw; float *itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw; float *itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw; float *otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 7 < outw; j += 8) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _sum4 = _mm_loadu_ps(btmp); __m128 _sum5 = _mm_loadu_ps(btmp); __m128 _sum6 = _mm_loadu_ps(btmp); __m128 _sum7 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _va9 = _mm_loadu_ps(itmp0 + 36); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _va9 = _mm_loadu_ps(itmp1 + 36); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _va9 = _mm_loadu_ps(itmp2 + 36); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); _mm_storeu_ps(otmp + 16, _sum4); _mm_storeu_ps(otmp + 20, _sum5); _mm_storeu_ps(otmp + 24, _sum6); _mm_storeu_ps(otmp + 28, _sum7); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; sum4[k] += itmp0[k + 16] * ktmp[k]; sum4[k] += itmp1[k + 16] * ktmp[k + 12]; sum4[k] += itmp2[k + 16] * ktmp[k + 24]; sum4[k] += itmp0[k + 20] * ktmp[k + 4]; sum4[k] += itmp1[k + 20] * ktmp[k + 16]; sum4[k] += itmp2[k + 20] * ktmp[k + 28]; sum4[k] += itmp0[k + 24] * ktmp[k + 8]; sum4[k] += itmp1[k + 24] * ktmp[k + 20]; sum4[k] += itmp2[k + 24] * ktmp[k + 32]; sum5[k] += itmp0[k + 20] * ktmp[k]; sum5[k] += itmp1[k + 20] * ktmp[k + 12]; sum5[k] += itmp2[k + 20] * ktmp[k + 24]; sum5[k] += itmp0[k + 24] * ktmp[k + 4]; sum5[k] += itmp1[k + 24] * ktmp[k + 16]; sum5[k] += itmp2[k + 24] * ktmp[k + 28]; sum5[k] += itmp0[k + 28] * ktmp[k + 8]; sum5[k] += itmp1[k + 28] * ktmp[k + 20]; sum5[k] += itmp2[k + 28] * ktmp[k + 32]; sum6[k] += itmp0[k + 24] * ktmp[k]; sum6[k] += itmp1[k + 24] * ktmp[k + 12]; sum6[k] += itmp2[k + 24] * ktmp[k + 24]; sum6[k] += itmp0[k + 28] * ktmp[k + 4]; sum6[k] += itmp1[k + 28] * ktmp[k + 16]; sum6[k] += itmp2[k + 28] * ktmp[k + 28]; sum6[k] += itmp0[k + 32] * ktmp[k + 8]; sum6[k] += itmp1[k + 32] * ktmp[k + 20]; sum6[k] += itmp2[k + 32] * ktmp[k + 32]; sum7[k] += itmp0[k + 28] * ktmp[k]; sum7[k] += itmp1[k + 28] * ktmp[k + 12]; sum7[k] += itmp2[k + 28] * ktmp[k + 24]; sum7[k] += itmp0[k + 32] * ktmp[k + 4]; sum7[k] += itmp1[k + 32] * ktmp[k + 16]; sum7[k] += itmp2[k + 32] * ktmp[k + 28]; sum7[k] += itmp0[k + 36] * ktmp[k + 8]; sum7[k] += itmp1[k + 36] * ktmp[k + 20]; sum7[k] += itmp2[k + 36] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; otmp[k + 16] = sum4[k]; otmp[k + 20] = sum5[k]; otmp[k + 24] = sum6[k]; otmp[k + 28] = sum7[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 4; itmp1 += 4; itmp2 += 4; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 4 * outwh; float *tmp0 = output + i * 4 * outwh; float *tmp1 = output + i * 4 * outwh + 1 * outwh; float *tmp2 = output + i * 4 * outwh + 2 * outwh; float *tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *otmp = output_tmp + channel_count * 4 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); //generate the image tmp float *img_tmp = (float *)sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 4 * inwh; float *tmp1 = kernel_tmp + channel_count * 4 * 9; float *tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 4 * 9; float *btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw; float *itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw; float *itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw; float *otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 8] * ktmp[k]; sum1[k] += itmp1[k + 8] * ktmp[k + 12]; sum1[k] += itmp2[k + 8] * ktmp[k + 24]; sum1[k] += itmp0[k + 12] * ktmp[k + 4]; sum1[k] += itmp1[k + 12] * ktmp[k + 16]; sum1[k] += itmp2[k + 12] * ktmp[k + 28]; sum1[k] += itmp0[k + 16] * ktmp[k + 8]; sum1[k] += itmp1[k + 16] * ktmp[k + 20]; sum1[k] += itmp2[k + 16] * ktmp[k + 32]; sum2[k] += itmp0[k + 16] * ktmp[k]; sum2[k] += itmp1[k + 16] * ktmp[k + 12]; sum2[k] += itmp2[k + 16] * ktmp[k + 24]; sum2[k] += itmp0[k + 20] * ktmp[k + 4]; sum2[k] += itmp1[k + 20] * ktmp[k + 16]; sum2[k] += itmp2[k + 20] * ktmp[k + 28]; sum2[k] += itmp0[k + 24] * ktmp[k + 8]; sum2[k] += itmp1[k + 24] * ktmp[k + 20]; sum2[k] += itmp2[k + 24] * ktmp[k + 32]; sum3[k] += itmp0[k + 24] * ktmp[k]; sum3[k] += itmp1[k + 24] * ktmp[k + 12]; sum3[k] += itmp2[k + 24] * ktmp[k + 24]; sum3[k] += itmp0[k + 28] * ktmp[k + 4]; sum3[k] += itmp1[k + 28] * ktmp[k + 16]; sum3[k] += itmp2[k + 28] * ktmp[k + 28]; sum3[k] += itmp0[k + 32] * ktmp[k + 8]; sum3[k] += itmp1[k + 32] * ktmp[k + 20]; sum3[k] += itmp2[k + 32] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 4 * outwh; float *tmp0 = output + i * 4 * outwh; float *tmp1 = output + i * 4 * outwh + 1 * outwh; float *tmp2 = output + i * 4 * outwh + 2 * outwh; float *tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *otmp = output_tmp + channel_count * 4 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #else static void convdw3x3s1(float *output, float *input, float *_kernel, float *_bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const float *kernel = _kernel; for (int g = 0; g < group; g++) { float *out = output + g * c_step_out; float *outptr = out; float *outptr2 = outptr + outw; const float bias0 = _bias ? _bias[g] : 0. f; const float *kernel0 = kernel + g * 9; const float *img0 = input + g * c_step_in; const float *r0 = img0; const float *r1 = img0 + w; const float *r2 = img0 + w * 2; const float *r3 = img0 + w * 3; const float *k0 = kernel0; const float *k1 = kernel0 + 3; const float *k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2(float *output, float *input, float *_kernel, float *_bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const int tailstep = w - 2 * outw + w; const float *kernel = _kernel; for (int g = 0; g < group; g++) { float *out = output + g * c_step_out; float *outptr = out; const float *kernel0 = kernel + g * 9; const float bias0 = _bias ? _bias[g] : 0. f; const float *img0 = input + g * c_step_in; const float *r0 = img0; const float *r1 = img0 + w; const float *r2 = img0 + w * 2; const float *k0 = kernel0; const float *k1 = kernel0 + 3; const float *k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } #endif int conv_dw_run(struct ir_tensor *input_tensor, struct ir_tensor *weight_tensor, struct ir_tensor *bias_tensor, struct ir_tensor *output_tensor, struct conv_param *param, int num_thread, int cpu_affinity) { float *input = (float *)input_tensor->data; float *output = (float *)output_tensor->data; float *kernel = (float *)weight_tensor->data; float *biases = NULL; if (bias_tensor) biases = (float *)bias_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_chw = inc * inh * inw; int outc = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_chw = out_hw * outc; int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int stride_w = param->stride_w; int stride_h = param->stride_h; int dilation_w = param->dilation_w; int dilation_h = param->dilation_h; int group = param->group; int activation = param->activation; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; float *input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input; else { input_tmp = (float *)sys_malloc(inh_tmp * inw_tmp * group * sizeof(float)); for (int g = 0; g < group; g++) { float *pad_in = input + g * inh * inw; float *pad_out = input_tmp + g * inh_tmp * inw_tmp; pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0. f); } } /* process */ for (int i = 0; i < batch_number; i++) { if (stride_h == 1) convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); else convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); } /* relu */ if (activation >= 0) relu(output, batch_number * out_chw, activation); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; }
/* * Copyright (c) 2020, OPEN AI LAB Author: qtang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_dw_kernel_x86.h" #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) void relu(float *data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = max(data[i], (float)0); if (activation > 0) { data[i] = min(data[i], (float)activation); } } } void pad(float *input, float *output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v) { float *ptr = input; float *outptr = output; int y = 0; //fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } //fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(float)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } //fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } #if __AVX__ static void convdw3x3s1(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); //generate the image tmp float *img_tmp = (float *)sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float *k0 = img_data + (ii + 0) * inwh; const float *k1 = img_data + (ii + 1) * inwh; const float *k2 = img_data + (ii + 2) * inwh; const float *k3 = img_data + (ii + 3) * inwh; const float *k4 = img_data + (ii + 4) * inwh; const float *k5 = img_data + (ii + 5) * inwh; const float *k6 = img_data + (ii + 6) * inwh; const float *k7 = img_data + (ii + 7) * inwh; const float *f0 = kernel_data + (ii + 0) * 9; const float *f1 = kernel_data + (ii + 1) * 9; const float *f2 = kernel_data + (ii + 2) * 9; const float *f3 = kernel_data + (ii + 3) * 9; const float *f4 = kernel_data + (ii + 4) * 9; const float *f5 = kernel_data + (ii + 5) * 9; const float *f6 = kernel_data + (ii + 6) * 9; const float *f7 = kernel_data + (ii + 7) * 9; const float *b0 = bias_data + (ii + 0); const float *b1 = bias_data + (ii + 1); const float *b2 = bias_data + (ii + 2); const float *b3 = bias_data + (ii + 3); const float *b4 = bias_data + (ii + 4); const float *b5 = bias_data + (ii + 5); const float *b6 = bias_data + (ii + 6); const float *b7 = bias_data + (ii + 7); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 8 * 9; float *btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw; float *itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw; float *itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw; float *otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _sum4 = _mm256_loadu_ps(btmp); __m256 _sum5 = _mm256_loadu_ps(btmp); __m256 _sum6 = _mm256_loadu_ps(btmp); __m256 _sum7 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _va9 = _mm256_loadu_ps(itmp0 + 72); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _va9 = _mm256_loadu_ps(itmp1 + 72); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _va9 = _mm256_loadu_ps(itmp2 + 72); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); _mm256_storeu_ps(otmp + 32, _sum4); _mm256_storeu_ps(otmp + 40, _sum5); _mm256_storeu_ps(otmp + 48, _sum6); _mm256_storeu_ps(otmp + 56, _sum7); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 8; } } } //load_data { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 8 * outwh; float *tmp0 = output + i * 8 * outwh; float *tmp1 = output + i * 8 * outwh + 1 * outwh; float *tmp2 = output + i * 8 * outwh + 2 * outwh; float *tmp3 = output + i * 8 * outwh + 3 * outwh; float *tmp4 = output + i * 8 * outwh + 4 * outwh; float *tmp5 = output + i * 8 * outwh + 5 * outwh; float *tmp6 = output + i * 8 * outwh + 6 * outwh; float *tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *otmp = output_tmp + ii * outwh; float *tmp0 = output + ii * outwh; float *tmp1 = output + ii * outwh + 1 * outwh; float *tmp2 = output + ii * outwh + 2 * outwh; float *tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *otmp = output_tmp + channel_count * 8 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); //generate the image tmp float *img_tmp = (float *)sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float *k0 = img_data + (ii + 0) * inwh; const float *k1 = img_data + (ii + 1) * inwh; const float *k2 = img_data + (ii + 2) * inwh; const float *k3 = img_data + (ii + 3) * inwh; const float *k4 = img_data + (ii + 4) * inwh; const float *k5 = img_data + (ii + 5) * inwh; const float *k6 = img_data + (ii + 6) * inwh; const float *k7 = img_data + (ii + 7) * inwh; const float *f0 = kernel_data + (ii + 0) * 9; const float *f1 = kernel_data + (ii + 1) * 9; const float *f2 = kernel_data + (ii + 2) * 9; const float *f3 = kernel_data + (ii + 3) * 9; const float *f4 = kernel_data + (ii + 4) * 9; const float *f5 = kernel_data + (ii + 5) * 9; const float *f6 = kernel_data + (ii + 6) * 9; const float *f7 = kernel_data + (ii + 7) * 9; const float *b0 = bias_data + (ii + 0); const float *b1 = bias_data + (ii + 1); const float *b2 = bias_data + (ii + 2); const float *b3 = bias_data + (ii + 3); const float *b4 = bias_data + (ii + 4); const float *b5 = bias_data + (ii + 5); const float *b6 = bias_data + (ii + 6); const float *b7 = bias_data + (ii + 7); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 8 * inwh; float *tmp1 = kernel_tmp + channel_count * 8 * 9; float *tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 8 * 9; float *btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw; float *itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw; float *itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw; float *otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 8; } } } //load_data { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 8 * outwh; float *tmp0 = output + i * 8 * outwh; float *tmp1 = output + i * 8 * outwh + 1 * outwh; float *tmp2 = output + i * 8 * outwh + 2 * outwh; float *tmp3 = output + i * 8 * outwh + 3 * outwh; float *tmp4 = output + i * 8 * outwh + 4 * outwh; float *tmp5 = output + i * 8 * outwh + 5 * outwh; float *tmp6 = output + i * 8 * outwh + 6 * outwh; float *tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float *otmp = output_tmp + ii * outwh; float *tmp0 = output + ii * outwh; float *tmp1 = output + ii * outwh + 1 * outwh; float *tmp2 = output + ii * outwh + 2 * outwh; float *tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float *otmp = output_tmp + channel_count * 8 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #elif __SSE2__ static void convdw3x3s1(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); //generate the image tmp float *img_tmp = (float *)sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 4 * inwh; float *tmp1 = kernel_tmp + channel_count * 4 * 9; float *tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 4 * 9; float *btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw; float *itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw; float *itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw; float *otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 7 < outw; j += 8) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _sum4 = _mm_loadu_ps(btmp); __m128 _sum5 = _mm_loadu_ps(btmp); __m128 _sum6 = _mm_loadu_ps(btmp); __m128 _sum7 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _va9 = _mm_loadu_ps(itmp0 + 36); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _va9 = _mm_loadu_ps(itmp1 + 36); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _va9 = _mm_loadu_ps(itmp2 + 36); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); _mm_storeu_ps(otmp + 16, _sum4); _mm_storeu_ps(otmp + 20, _sum5); _mm_storeu_ps(otmp + 24, _sum6); _mm_storeu_ps(otmp + 28, _sum7); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; sum4[k] += itmp0[k + 16] * ktmp[k]; sum4[k] += itmp1[k + 16] * ktmp[k + 12]; sum4[k] += itmp2[k + 16] * ktmp[k + 24]; sum4[k] += itmp0[k + 20] * ktmp[k + 4]; sum4[k] += itmp1[k + 20] * ktmp[k + 16]; sum4[k] += itmp2[k + 20] * ktmp[k + 28]; sum4[k] += itmp0[k + 24] * ktmp[k + 8]; sum4[k] += itmp1[k + 24] * ktmp[k + 20]; sum4[k] += itmp2[k + 24] * ktmp[k + 32]; sum5[k] += itmp0[k + 20] * ktmp[k]; sum5[k] += itmp1[k + 20] * ktmp[k + 12]; sum5[k] += itmp2[k + 20] * ktmp[k + 24]; sum5[k] += itmp0[k + 24] * ktmp[k + 4]; sum5[k] += itmp1[k + 24] * ktmp[k + 16]; sum5[k] += itmp2[k + 24] * ktmp[k + 28]; sum5[k] += itmp0[k + 28] * ktmp[k + 8]; sum5[k] += itmp1[k + 28] * ktmp[k + 20]; sum5[k] += itmp2[k + 28] * ktmp[k + 32]; sum6[k] += itmp0[k + 24] * ktmp[k]; sum6[k] += itmp1[k + 24] * ktmp[k + 12]; sum6[k] += itmp2[k + 24] * ktmp[k + 24]; sum6[k] += itmp0[k + 28] * ktmp[k + 4]; sum6[k] += itmp1[k + 28] * ktmp[k + 16]; sum6[k] += itmp2[k + 28] * ktmp[k + 28]; sum6[k] += itmp0[k + 32] * ktmp[k + 8]; sum6[k] += itmp1[k + 32] * ktmp[k + 20]; sum6[k] += itmp2[k + 32] * ktmp[k + 32]; sum7[k] += itmp0[k + 28] * ktmp[k]; sum7[k] += itmp1[k + 28] * ktmp[k + 12]; sum7[k] += itmp2[k + 28] * ktmp[k + 24]; sum7[k] += itmp0[k + 32] * ktmp[k + 4]; sum7[k] += itmp1[k + 32] * ktmp[k + 16]; sum7[k] += itmp2[k + 32] * ktmp[k + 28]; sum7[k] += itmp0[k + 36] * ktmp[k + 8]; sum7[k] += itmp1[k + 36] * ktmp[k + 20]; sum7[k] += itmp2[k + 36] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; otmp[k + 16] = sum4[k]; otmp[k + 20] = sum5[k]; otmp[k + 24] = sum6[k]; otmp[k + 28] = sum7[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 4; itmp1 += 4; itmp2 += 4; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 4 * outwh; float *tmp0 = output + i * 4 * outwh; float *tmp1 = output + i * 4 * outwh + 1 * outwh; float *tmp2 = output + i * 4 * outwh + 2 * outwh; float *tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *otmp = output_tmp + channel_count * 4 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float *output, float *img_data, float *kernel_data, float *bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); //generate the image tmp float *img_tmp = (float *)sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float *kernel_tmp = (float *)sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float *bias_tmp = (float *)sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float *k0 = img_data + (ii + 0) * inwh; float *k1 = img_data + (ii + 1) * inwh; float *k2 = img_data + (ii + 2) * inwh; float *k3 = img_data + (ii + 3) * inwh; float *f0 = kernel_data + (ii + 0) * 9; float *f1 = kernel_data + (ii + 1) * 9; float *f2 = kernel_data + (ii + 2) * 9; float *f3 = kernel_data + (ii + 3) * 9; float *b0 = bias_data + (ii + 0); float *b1 = bias_data + (ii + 1); float *b2 = bias_data + (ii + 2); float *b3 = bias_data + (ii + 3); float *tmp0 = img_tmp + ii * inwh; float *tmp1 = kernel_tmp + ii * 9; float *tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *k0 = img_data + ii * inwh; float *f0 = kernel_data + ii * 9; float *b0 = bias_data + ii; float *tmp0 = img_tmp + channel_count * 4 * inwh; float *tmp1 = kernel_tmp + channel_count * 4 * 9; float *tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float *output_tmp = (float *)sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float *ktmp = kernel_tmp + c * 4 * 9; float *btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float *itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw; float *itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw; float *itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw; float *otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 8] * ktmp[k]; sum1[k] += itmp1[k + 8] * ktmp[k + 12]; sum1[k] += itmp2[k + 8] * ktmp[k + 24]; sum1[k] += itmp0[k + 12] * ktmp[k + 4]; sum1[k] += itmp1[k + 12] * ktmp[k + 16]; sum1[k] += itmp2[k + 12] * ktmp[k + 28]; sum1[k] += itmp0[k + 16] * ktmp[k + 8]; sum1[k] += itmp1[k + 16] * ktmp[k + 20]; sum1[k] += itmp2[k + 16] * ktmp[k + 32]; sum2[k] += itmp0[k + 16] * ktmp[k]; sum2[k] += itmp1[k + 16] * ktmp[k + 12]; sum2[k] += itmp2[k + 16] * ktmp[k + 24]; sum2[k] += itmp0[k + 20] * ktmp[k + 4]; sum2[k] += itmp1[k + 20] * ktmp[k + 16]; sum2[k] += itmp2[k + 20] * ktmp[k + 28]; sum2[k] += itmp0[k + 24] * ktmp[k + 8]; sum2[k] += itmp1[k + 24] * ktmp[k + 20]; sum2[k] += itmp2[k + 24] * ktmp[k + 32]; sum3[k] += itmp0[k + 24] * ktmp[k]; sum3[k] += itmp1[k + 24] * ktmp[k + 12]; sum3[k] += itmp2[k + 24] * ktmp[k + 24]; sum3[k] += itmp0[k + 28] * ktmp[k + 4]; sum3[k] += itmp1[k + 28] * ktmp[k + 16]; sum3[k] += itmp2[k + 28] * ktmp[k + 28]; sum3[k] += itmp0[k + 32] * ktmp[k + 8]; sum3[k] += itmp1[k + 32] * ktmp[k + 20]; sum3[k] += itmp2[k + 32] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float *otmp = output_tmp + i * 4 * outwh; float *tmp0 = output + i * 4 * outwh; float *tmp1 = output + i * 4 * outwh + 1 * outwh; float *tmp2 = output + i * 4 * outwh + 2 * outwh; float *tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float *otmp = output_tmp + channel_count * 4 * outwh; float *tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #else static void convdw3x3s1(float *output, float *input, float *_kernel, float *_bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const float *kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float *out = output + g * c_step_out; float *outptr = out; float *outptr2 = outptr + outw; const float bias0 = _bias ? _bias[g] : 0. f; const float *kernel0 = kernel + g * 9; const float *img0 = input + g * c_step_in; const float *r0 = img0; const float *r1 = img0 + w; const float *r2 = img0 + w * 2; const float *r3 = img0 + w * 3; const float *k0 = kernel0; const float *k1 = kernel0 + 3; const float *k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2(float *output, float *input, float *_kernel, float *_bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const int tailstep = w - 2 * outw + w; const float *kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float *out = output + g * c_step_out; float *outptr = out; const float *kernel0 = kernel + g * 9; const float bias0 = _bias ? _bias[g] : 0. f; const float *img0 = input + g * c_step_in; const float *r0 = img0; const float *r1 = img0 + w; const float *r2 = img0 + w * 2; const float *k0 = kernel0; const float *k1 = kernel0 + 3; const float *k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } #endif int conv_dw_run(struct ir_tensor *input_tensor, struct ir_tensor *weight_tensor, struct ir_tensor *bias_tensor, struct ir_tensor *output_tensor, struct conv_param *param, int num_thread, int cpu_affinity) { float *input = (float *)input_tensor->data; float *output = (float *)output_tensor->data; float *kernel = (float *)weight_tensor->data; float *biases = NULL; if (bias_tensor) biases = (float *)bias_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_chw = inc * inh * inw; int outc = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_chw = out_hw * outc; int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int stride_w = param->stride_w; int stride_h = param->stride_h; int dilation_w = param->dilation_w; int dilation_h = param->dilation_h; int group = param->group; int activation = param->activation; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; float *input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input; else { input_tmp = (float *)sys_malloc(inh_tmp * inw_tmp * group * sizeof(float)); for (int g = 0; g < group; g++) { float *pad_in = input + g * inh * inw; float *pad_out = input_tmp + g * inh_tmp * inw_tmp; pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0. f); } } /* process */ for (int i = 0; i < batch_number; i++) { if (stride_h == 1) convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); else convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); } /* relu */ if (activation >= 0) relu(output, batch_number * out_chw, activation); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; }
c-decl.c
/* Process declarations and variables for C compiler. Copyright (C) 1988-2015 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "input.h" #include "tm.h" #include "intl.h" #include "hash-set.h" #include "vec.h" #include "symtab.h" #include "input.h" #include "alias.h" #include "double-int.h" #include "machmode.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "print-tree.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "stringpool.h" #include "tree-inline.h" #include "flags.h" #include "hashtab.h" #include "hash-set.h" #include "vec.h" #include "machmode.h" #include "hard-reg-set.h" #include "function.h" #include "c-tree.h" #include "toplev.h" #include "tm_p.h" #include "cpplib.h" #include "target.h" #include "debug.h" #include "opts.h" #include "timevar.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "c-lang.h" #include "langhooks.h" #include "tree-iterator.h" #include "diagnostic-core.h" #include "dumpfile.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" #include "hash-table.h" #include "langhooks-def.h" #include "plugin.h" #include "c-family/c-ada-spec.h" #include "cilk.h" #include "builtins.h" /* In grokdeclarator, distinguish syntactic contexts of declarators. */ enum decl_context { NORMAL, /* Ordinary declaration */ FUNCDEF, /* Function definition */ PARM, /* Declaration of parm before function body */ FIELD, /* Declaration inside struct or union */ TYPENAME}; /* Typename (inside cast or sizeof) */ /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states { DEPRECATED_NORMAL, DEPRECATED_SUPPRESS }; /* Nonzero if we have seen an invalid cross reference to a struct, union, or enum, but not yet printed the message. */ tree pending_invalid_xref; /* File and line to appear in the eventual error message. */ location_t pending_invalid_xref_location; /* The file and line that the prototype came from if this is an old-style definition; used for diagnostics in store_parm_decls_oldstyle. */ static location_t current_function_prototype_locus; /* Whether this prototype was built-in. */ static bool current_function_prototype_built_in; /* The argument type information of this prototype. */ static tree current_function_prototype_arg_types; /* The argument information structure for the function currently being defined. */ static struct c_arg_info *current_function_arg_info; /* The obstack on which parser and related data structures, which are not live beyond their top-level declaration or definition, are allocated. */ struct obstack parser_obstack; /* The current statement tree. */ static GTY(()) struct stmt_tree_s c_stmt_tree; /* State saving variables. */ tree c_break_label; tree c_cont_label; /* A list of decls to be made automatically visible in each file scope. */ static GTY(()) tree visible_builtins; /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ int current_function_returns_abnormally; /* Set to nonzero by `grokdeclarator' for a function whose return type is defaulted, if warnings for this are desired. */ static int warn_about_return_type; /* Nonzero when the current toplevel function contains a declaration of a nested function which is never defined. */ static bool undef_nested_function; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int current_omp_declare_target_attribute; /* Each c_binding structure describes one binding of an identifier to a decl. All the decls in a scope - irrespective of namespace - are chained together by the ->prev field, which (as the name implies) runs in reverse order. All the decls in a given namespace bound to a given identifier are chained by the ->shadowed field, which runs from inner to outer scopes. The ->decl field usually points to a DECL node, but there are two exceptions. In the namespace of type tags, the bound entity is a RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared identifier is encountered, it is bound to error_mark_node to suppress further errors about that identifier in the current function. The ->u.type field stores the type of the declaration in this scope; if NULL, the type is the type of the ->decl field. This is only of relevance for objects with external or internal linkage which may be redeclared in inner scopes, forming composite types that only persist for the duration of those scopes. In the external scope, this stores the composite of all the types declared for this object, visible or not. The ->inner_comp field (used only at file scope) stores whether an incomplete array type at file scope was completed at an inner scope to an array size other than 1. The ->u.label field is used for labels. It points to a structure which stores additional information used for warnings. The depth field is copied from the scope structure that holds this decl. It is used to preserve the proper ordering of the ->shadowed field (see bind()) and also for a handful of special-case checks. Finally, the invisible bit is true for a decl which should be ignored for purposes of normal name lookup, and the nested bit is true for a decl that's been bound a second time in an inner scope; in all such cases, the binding in the outer scope will have its invisible bit true. */ struct GTY((chain_next ("%h.prev"))) c_binding { union GTY(()) { /* first so GTY desc can use decl */ tree GTY((tag ("0"))) type; /* the type in this scope */ struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */ } GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u; tree decl; /* the decl bound */ tree id; /* the identifier it's bound to */ struct c_binding *prev; /* the previous decl in this scope */ struct c_binding *shadowed; /* the innermost decl shadowed by this one */ unsigned int depth : 28; /* depth of this scope */ BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */ BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */ BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */ BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */ location_t locus; /* location for nested bindings */ }; #define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth) #define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth) #define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/) #define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/) /* Each C symbol points to three linked lists of c_binding structures. These describe the values of the identifier in the three different namespaces defined by the language. */ struct GTY(()) lang_identifier { struct c_common_identifier common_id; struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */ struct c_binding *tag_binding; /* struct/union/enum tags */ struct c_binding *label_binding; /* labels */ }; /* Validate c-lang.c's assumptions. */ extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate [(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1]; /* The binding oracle; see c-tree.h. */ void (*c_binding_oracle) (enum c_oracle_request, tree identifier); /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's symbol binding. */ #define I_SYMBOL_CHECKED(node) \ (TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding* * i_symbol_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->symbol_binding == NULL && c_binding_oracle != NULL && !I_SYMBOL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_SYMBOL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_SYMBOL, node); } return &lid->symbol_binding; } #define I_SYMBOL_BINDING(node) (*i_symbol_binding (node)) #define I_SYMBOL_DECL(node) \ (I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's tag binding. */ #define I_TAG_CHECKED(node) \ (TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_tag_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->tag_binding == NULL && c_binding_oracle != NULL && !I_TAG_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_TAG_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_TAG, node); } return &lid->tag_binding; } #define I_TAG_BINDING(node) (*i_tag_binding (node)) #define I_TAG_DECL(node) \ (I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's label binding. */ #define I_LABEL_CHECKED(node) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_label_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->label_binding == NULL && c_binding_oracle != NULL && !I_LABEL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_LABEL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_LABEL, node); } return &lid->label_binding; } #define I_LABEL_BINDING(node) (*i_label_binding (node)) #define I_LABEL_DECL(node) \ (I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0) /* The resulting tree type. */ union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("0"), desc ("tree_node_structure (&%h)"))) generic; struct lang_identifier GTY ((tag ("1"))) identifier; }; /* Track bindings and other things that matter for goto warnings. For efficiency, we do not gather all the decls at the point of definition. Instead, we point into the bindings structure. As scopes are popped, we update these structures and gather the decls that matter at that time. */ struct GTY(()) c_spot_bindings { /* The currently open scope which holds bindings defined when the label was defined or the goto statement was found. */ struct c_scope *scope; /* The bindings in the scope field which were defined at the point of the label or goto. This lets us look at older or newer bindings in the scope, as appropriate. */ struct c_binding *bindings_in_scope; /* The number of statement expressions that have started since this label or goto statement was defined. This is zero if we are at the same statement expression level. It is positive if we are in a statement expression started since this spot. It is negative if this spot was in a statement expression and we have left it. */ int stmt_exprs; /* Whether we started in a statement expression but are no longer in it. This is set to true if stmt_exprs ever goes negative. */ bool left_stmt_expr; }; /* This structure is used to keep track of bindings seen when a goto statement is defined. This is only used if we see the goto statement before we see the label. */ struct GTY(()) c_goto_bindings { /* The location of the goto statement. */ location_t loc; /* The bindings of the goto statement. */ struct c_spot_bindings goto_bindings; }; typedef struct c_goto_bindings *c_goto_bindings_p; /* The additional information we keep track of for a label binding. These fields are updated as scopes are popped. */ struct GTY(()) c_label_vars { /* The shadowed c_label_vars, when one label shadows another (which can only happen using a __label__ declaration). */ struct c_label_vars *shadowed; /* The bindings when the label was defined. */ struct c_spot_bindings label_bindings; /* A list of decls that we care about: decls about which we should warn if a goto branches to this label from later in the function. Decls are added to this list as scopes are popped. We only add the decls that matter. */ vec<tree, va_gc> *decls_in_scope; /* A list of goto statements to this label. This is only used for goto statements seen before the label was defined, so that we can issue appropriate warnings for them. */ vec<c_goto_bindings_p, va_gc> *gotos; }; /* Each c_scope structure describes the complete contents of one scope. Four scopes are distinguished specially: the innermost or current scope, the innermost function scope, the file scope (always the second to outermost) and the outermost or external scope. Most declarations are recorded in the current scope. All normal label declarations are recorded in the innermost function scope, as are bindings of undeclared identifiers to error_mark_node. (GCC permits nested functions as an extension, hence the 'innermost' qualifier.) Explicitly declared labels (using the __label__ extension) appear in the current scope. Being in the file scope (current_scope == file_scope) causes special behavior in several places below. Also, under some conditions the Objective-C front end records declarations in the file scope even though that isn't the current scope. All declarations with external linkage are recorded in the external scope, even if they aren't visible there; this models the fact that such declarations are visible to the entire program, and (with a bit of cleverness, see pushdecl) allows diagnosis of some violations of C99 6.2.2p7 and 6.2.7p2: If, within the same translation unit, the same identifier appears with both internal and external linkage, the behavior is undefined. All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined. Initially only the built-in declarations, which describe compiler intrinsic functions plus a subset of the standard library, are in this scope. The order of the blocks list matters, and it is frequently appended to. To avoid having to walk all the way to the end of the list on each insertion, or reverse the list later, we maintain a pointer to the last list entry. (FIXME: It should be feasible to use a reversed list here.) The bindings list is strictly in reverse order of declarations; pop_scope relies on this. */ struct GTY((chain_next ("%h.outer"))) c_scope { /* The scope containing this one. */ struct c_scope *outer; /* The next outermost function scope. */ struct c_scope *outer_function; /* All bindings in this scope. */ struct c_binding *bindings; /* For each scope (except the global one), a chain of BLOCK nodes for all the scopes that were entered and exited one level down. */ tree blocks; tree blocks_last; /* The depth of this scope. Used to keep the ->shadowed chain of bindings sorted innermost to outermost. */ unsigned int depth : 28; /* True if we are currently filling this scope with parameter declarations. */ BOOL_BITFIELD parm_flag : 1; /* True if we saw [*] in this scope. Used to give an error messages if these appears in a function definition. */ BOOL_BITFIELD had_vla_unspec : 1; /* True if we already complained about forward parameter decls in this scope. This prevents double warnings on foo (int a; int b; ...) */ BOOL_BITFIELD warned_forward_parm_decls : 1; /* True if this is the outermost block scope of a function body. This scope contains the parameters, the local variables declared in the outermost block, and all the labels (except those in nested functions, or declared at block scope with __label__). */ BOOL_BITFIELD function_body : 1; /* True means make a BLOCK for this scope no matter what. */ BOOL_BITFIELD keep : 1; /* True means that an unsuffixed float constant is _Decimal64. */ BOOL_BITFIELD float_const_decimal64 : 1; /* True if this scope has any label bindings. This is used to speed up searching for labels when popping scopes, particularly since labels are normally only found at function scope. */ BOOL_BITFIELD has_label_bindings : 1; /* True if we should issue a warning if a goto statement crosses any of the bindings. We still need to check the list of bindings to find the specific ones we need to warn about. This is true if decl_jump_unsafe would return true for any of the bindings. This is used to avoid looping over all the bindings unnecessarily. */ BOOL_BITFIELD has_jump_unsafe_decl : 1; }; /* The scope currently in effect. */ static GTY(()) struct c_scope *current_scope; /* The innermost function scope. Ordinary (not explicitly declared) labels, bindings to error_mark_node, and the lazily-created bindings of __func__ and its friends get this scope. */ static GTY(()) struct c_scope *current_function_scope; /* The C file scope. This is reset for each input translation unit. */ static GTY(()) struct c_scope *file_scope; /* The outermost scope. This is used for all declarations with external linkage, and only these, hence the name. */ static GTY(()) struct c_scope *external_scope; /* A chain of c_scope structures awaiting reuse. */ static GTY((deletable)) struct c_scope *scope_freelist; /* A chain of c_binding structures awaiting reuse. */ static GTY((deletable)) struct c_binding *binding_freelist; /* Append VAR to LIST in scope SCOPE. */ #define SCOPE_LIST_APPEND(scope, list, decl) do { \ struct c_scope *s_ = (scope); \ tree d_ = (decl); \ if (s_->list##_last) \ BLOCK_CHAIN (s_->list##_last) = d_; \ else \ s_->list = d_; \ s_->list##_last = d_; \ } while (0) /* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */ #define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \ struct c_scope *t_ = (tscope); \ struct c_scope *f_ = (fscope); \ if (t_->to##_last) \ BLOCK_CHAIN (t_->to##_last) = f_->from; \ else \ t_->to = f_->from; \ t_->to##_last = f_->from##_last; \ } while (0) /* A c_inline_static structure stores details of a static identifier referenced in a definition of a function that may be an inline definition if no subsequent declaration of that function uses "extern" or does not use "inline". */ struct GTY((chain_next ("%h.next"))) c_inline_static { /* The location for a diagnostic. */ location_t location; /* The function that may be an inline definition. */ tree function; /* The object or function referenced. */ tree static_decl; /* What sort of reference this is. */ enum c_inline_static_type type; /* The next such structure or NULL. */ struct c_inline_static *next; }; /* List of static identifiers used or referenced in functions that may be inline definitions. */ static GTY(()) struct c_inline_static *c_inline_statics; /* True means unconditionally make a BLOCK for the next scope pushed. */ static bool keep_next_level_flag; /* True means the next call to push_scope will be the outermost scope of a function body, so do not push a new scope, merely cease expecting parameter decls. */ static bool next_is_function_body; /* A vector of pointers to c_binding structures. */ typedef struct c_binding *c_binding_ptr; /* Information that we keep for a struct or union while it is being parsed. */ struct c_struct_parse_info { /* If warn_cxx_compat, a list of types defined within this struct. */ vec<tree> struct_types; /* If warn_cxx_compat, a list of field names which have bindings, and which are defined in this struct, but which are not defined in any enclosing struct. This is used to clear the in_struct field of the c_bindings structure. */ vec<c_binding_ptr> fields; /* If warn_cxx_compat, a list of typedef names used when defining fields in this struct. */ vec<tree> typedefs_seen; }; /* Information for the struct or union currently being parsed, or NULL if not parsing a struct or union. */ static struct c_struct_parse_info *struct_parse_info; /* Forward declarations. */ static tree lookup_name_in_scope (tree, struct c_scope *); static tree c_make_fname_decl (location_t, tree, int); static tree grokdeclarator (const struct c_declarator *, struct c_declspecs *, enum decl_context, bool, tree *, tree *, tree *, bool *, enum deprecated_states); static tree grokparms (struct c_arg_info *, bool); static void layout_array_type (tree); static void warn_defaults_to (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); /* T is a statement. Add it to the statement-tree. This is the C/ObjC version--C++ has a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ if (!building_stmt_list_p ()) push_stmt_list (); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Build a pointer type using the default pointer mode. */ static tree c_build_pointer_type (tree to_type) { addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC : TYPE_ADDR_SPACE (to_type); machine_mode pointer_mode; if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode) pointer_mode = targetm.addr_space.pointer_mode (as); else pointer_mode = c_default_pointer_mode; return build_pointer_type_for_mode (to_type, pointer_mode, false); } /* Return true if we will want to say something if a goto statement crosses DECL. */ static bool decl_jump_unsafe (tree decl) { if (error_operand_p (decl)) return false; /* Always warn about crossing variably modified types. */ if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == TYPE_DECL) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) return true; /* Otherwise, only warn if -Wgoto-misses-init and this is an initialized automatic decl. */ if (warn_jump_misses_init && TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl) && DECL_INITIAL (decl) != NULL_TREE) return true; return false; } void c_print_identifier (FILE *file, tree node, int indent) { void (*save) (enum c_oracle_request, tree identifier); /* Temporarily hide any binding oracle. Without this, calls to debug_tree from the debugger will end up calling into the oracle, making for a confusing debug session. As the oracle isn't needed here for normal operation, it's simplest to suppress it. */ save = c_binding_oracle; c_binding_oracle = NULL; print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4); print_node (file, "tag", I_TAG_DECL (node), indent + 4); print_node (file, "label", I_LABEL_DECL (node), indent + 4); if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN) { tree rid = ridpointers[C_RID_CODE (node)]; indent_to (file, indent + 4); fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"", (void *) rid, IDENTIFIER_POINTER (rid)); } c_binding_oracle = save; } /* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL, which may be any of several kinds of DECL or TYPE or error_mark_node, in the scope SCOPE. */ static void bind (tree name, tree decl, struct c_scope *scope, bool invisible, bool nested, location_t locus) { struct c_binding *b, **here; if (binding_freelist) { b = binding_freelist; binding_freelist = b->prev; } else b = ggc_alloc<c_binding> (); b->shadowed = 0; b->decl = decl; b->id = name; b->depth = scope->depth; b->invisible = invisible; b->nested = nested; b->inner_comp = 0; b->in_struct = 0; b->locus = locus; b->u.type = NULL; b->prev = scope->bindings; scope->bindings = b; if (decl_jump_unsafe (decl)) scope->has_jump_unsafe_decl = 1; if (!name) return; switch (TREE_CODE (decl)) { case LABEL_DECL: here = &I_LABEL_BINDING (name); break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: here = &I_TAG_BINDING (name); break; case VAR_DECL: case FUNCTION_DECL: case TYPE_DECL: case CONST_DECL: case PARM_DECL: case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break; default: gcc_unreachable (); } /* Locate the appropriate place in the chain of shadowed decls to insert this binding. Normally, scope == current_scope and this does nothing. */ while (*here && (*here)->depth > scope->depth) here = &(*here)->shadowed; b->shadowed = *here; *here = b; } /* Clear the binding structure B, stick it on the binding_freelist, and return the former value of b->prev. This is used by pop_scope and get_parm_info to iterate destructively over all the bindings from a given scope. */ static struct c_binding * free_binding_and_advance (struct c_binding *b) { struct c_binding *prev = b->prev; memset (b, 0, sizeof (struct c_binding)); b->prev = binding_freelist; binding_freelist = b; return prev; } /* Bind a label. Like bind, but skip fields which aren't used for labels, and add the LABEL_VARS value. */ static void bind_label (tree name, tree label, struct c_scope *scope, struct c_label_vars *label_vars) { struct c_binding *b; bind (name, label, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); scope->has_label_bindings = true; b = scope->bindings; gcc_assert (b->decl == label); label_vars->shadowed = b->u.label; b->u.label = label_vars; } /* Hook called at end of compilation to assume 1 elt for a file-scope tentative array defn that wasn't complete before. */ void c_finish_incomplete_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL) { tree type = TREE_TYPE (decl); if (type != error_mark_node && TREE_CODE (type) == ARRAY_TYPE && !DECL_EXTERNAL (decl) && TYPE_DOMAIN (type) == 0) { warning_at (DECL_SOURCE_LOCATION (decl), 0, "array %q+D assumed to have one element", decl); complete_array_type (&TREE_TYPE (decl), NULL_TREE, true); relayout_decl (decl); } } } /* Record that inline function FUNC contains a reference (location LOC) to static DECL (file-scope or function-local according to TYPE). */ void record_inline_static (location_t loc, tree func, tree decl, enum c_inline_static_type type) { c_inline_static *csi = ggc_alloc<c_inline_static> (); csi->location = loc; csi->function = func; csi->static_decl = decl; csi->type = type; csi->next = c_inline_statics; c_inline_statics = csi; } /* Check for references to static declarations in inline functions at the end of the translation unit and diagnose them if the functions are still inline definitions. */ static void check_inline_statics (void) { struct c_inline_static *csi; for (csi = c_inline_statics; csi; csi = csi->next) { if (DECL_EXTERNAL (csi->function)) switch (csi->type) { case csi_internal: pedwarn (csi->location, 0, "%qD is static but used in inline function %qD " "which is not static", csi->static_decl, csi->function); break; case csi_modifiable: pedwarn (csi->location, 0, "%q+D is static but declared in inline function %qD " "which is not static", csi->static_decl, csi->function); break; default: gcc_unreachable (); } } c_inline_statics = NULL; } /* Fill in a c_spot_bindings structure. If DEFINING is true, set it for the current state, otherwise set it to uninitialized. */ static void set_spot_bindings (struct c_spot_bindings *p, bool defining) { if (defining) { p->scope = current_scope; p->bindings_in_scope = current_scope->bindings; } else { p->scope = NULL; p->bindings_in_scope = NULL; } p->stmt_exprs = 0; p->left_stmt_expr = false; } /* Update spot bindings P as we pop out of SCOPE. Return true if we should push decls for a label. */ static bool update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p) { if (p->scope != scope) { /* This label or goto is defined in some other scope, or it is a label which is not yet defined. There is nothing to update. */ return false; } /* Adjust the spot bindings to refer to the bindings already defined in the enclosing scope. */ p->scope = scope->outer; p->bindings_in_scope = p->scope->bindings; return true; } /* The Objective-C front-end often needs to determine the current scope. */ void * objc_get_current_scope (void) { return current_scope; } /* The following function is used only by Objective-C. It needs to live here because it accesses the innards of c_scope. */ void objc_mark_locals_volatile (void *enclosing_blk) { struct c_scope *scope; struct c_binding *b; for (scope = current_scope; scope && scope != enclosing_blk; scope = scope->outer) { for (b = scope->bindings; b; b = b->prev) objc_volatilize_decl (b->decl); /* Do not climb up past the current function. */ if (scope->function_body) break; } } /* Return true if we are in the global binding level. */ bool global_bindings_p (void) { return current_scope == file_scope; } void keep_next_level (void) { keep_next_level_flag = true; } /* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */ void set_float_const_decimal64 (void) { current_scope->float_const_decimal64 = true; } /* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */ void clear_float_const_decimal64 (void) { current_scope->float_const_decimal64 = false; } /* Return nonzero if an unsuffixed float constant is _Decimal64. */ bool float_const_decimal64_p (void) { return current_scope->float_const_decimal64; } /* Identify this scope as currently being filled with parameters. */ void declare_parm_level (void) { current_scope->parm_flag = true; } void push_scope (void) { if (next_is_function_body) { /* This is the transition from the parameters to the top level of the function body. These are the same scope (C99 6.2.1p4,6) so we do not push another scope structure. next_is_function_body is set only by store_parm_decls, which in turn is called when and only when we are about to encounter the opening curly brace for the function body. The outermost block of a function always gets a BLOCK node, because the debugging output routines expect that each function has at least one BLOCK. */ current_scope->parm_flag = false; current_scope->function_body = true; current_scope->keep = true; current_scope->outer_function = current_function_scope; current_function_scope = current_scope; keep_next_level_flag = false; next_is_function_body = false; /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope->outer) current_scope->float_const_decimal64 = current_scope->outer->float_const_decimal64; else current_scope->float_const_decimal64 = false; } else { struct c_scope *scope; if (scope_freelist) { scope = scope_freelist; scope_freelist = scope->outer; } else scope = ggc_cleared_alloc<c_scope> (); /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope) scope->float_const_decimal64 = current_scope->float_const_decimal64; else scope->float_const_decimal64 = false; scope->keep = keep_next_level_flag; scope->outer = current_scope; scope->depth = current_scope ? (current_scope->depth + 1) : 0; /* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but possible. */ if (current_scope && scope->depth == 0) { scope->depth--; sorry ("GCC supports only %u nested scopes", scope->depth); } current_scope = scope; keep_next_level_flag = false; } } /* This is called when we are leaving SCOPE. For each label defined in SCOPE, add any appropriate decls to its decls_in_scope fields. These are the decls whose initialization will be skipped by a goto later in the function. */ static void update_label_decls (struct c_scope *scope) { struct c_scope *s; s = scope; while (s != NULL) { if (s->has_label_bindings) { struct c_binding *b; for (b = s->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; struct c_binding *b1; bool hjud; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; b1 = label_vars->label_bindings.bindings_in_scope; if (label_vars->label_bindings.scope == NULL) hjud = false; else hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl; if (update_spot_bindings (scope, &label_vars->label_bindings)) { /* This label is defined in this scope. */ if (hjud) { for (; b1 != NULL; b1 = b1->prev) { /* A goto from later in the function to this label will never see the initialization of B1, if any. Save it to issue a warning if needed. */ if (decl_jump_unsafe (b1->decl)) vec_safe_push(label_vars->decls_in_scope, b1->decl); } } } /* Update the bindings of any goto statements associated with this label. */ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) update_spot_bindings (scope, &g->goto_bindings); } } /* Don't search beyond the current function. */ if (s == current_function_scope) break; s = s->outer; } } /* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */ static void set_type_context (tree type, tree context) { for (type = TYPE_MAIN_VARIANT (type); type; type = TYPE_NEXT_VARIANT (type)) TYPE_CONTEXT (type) = context; } /* Exit a scope. Restore the state of the identifier-decl mappings that were in effect when this scope was entered. Return a BLOCK node containing all the DECLs in this scope that are of interest to debug info generation. */ tree pop_scope (void) { struct c_scope *scope = current_scope; tree block, context, p; struct c_binding *b; bool functionbody = scope->function_body; bool keep = functionbody || scope->keep || scope->bindings; update_label_decls (scope); /* If appropriate, create a BLOCK to record the decls for the life of this function. */ block = 0; if (keep) { block = make_node (BLOCK); BLOCK_SUBBLOCKS (block) = scope->blocks; TREE_USED (block) = 1; /* In each subblock, record that this is its superior. */ for (p = scope->blocks; p; p = BLOCK_CHAIN (p)) BLOCK_SUPERCONTEXT (p) = block; BLOCK_VARS (block) = 0; } /* The TYPE_CONTEXTs for all of the tagged types belonging to this scope must be set so that they point to the appropriate construct, i.e. either to the current FUNCTION_DECL node, or else to the BLOCK node we just constructed. Note that for tagged types whose scope is just the formal parameter list for some function type specification, we can't properly set their TYPE_CONTEXTs here, because we don't have a pointer to the appropriate FUNCTION_TYPE node readily available to us. For those cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set in `grokdeclarator' as soon as we have created the FUNCTION_TYPE node which will represent the "scope" for these "parameter list local" tagged types. */ if (scope->function_body) context = current_function_decl; else if (scope == file_scope) { tree file_decl = build_translation_unit_decl (NULL_TREE); context = file_decl; debug_hooks->register_main_translation_unit (file_decl); } else context = block; /* Clear all bindings in this scope. */ for (b = scope->bindings; b; b = free_binding_and_advance (b)) { p = b->decl; switch (TREE_CODE (p)) { case LABEL_DECL: /* Warnings for unused labels, errors for undefined labels. */ if (TREE_USED (p) && !DECL_INITIAL (p)) { error ("label %q+D used but not defined", p); DECL_INITIAL (p) = error_mark_node; } else warn_for_unused_label (p); /* Labels go in BLOCK_VARS. */ DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; gcc_assert (I_LABEL_BINDING (b->id) == b); I_LABEL_BINDING (b->id) = b->shadowed; /* Also pop back to the shadowed label_vars. */ release_tree_vector (b->u.label->decls_in_scope); b->u.label = b->u.label->shadowed; break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: set_type_context (p, context); /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } break; case FUNCTION_DECL: /* Propagate TREE_ADDRESSABLE from nested functions to their containing functions. */ if (!TREE_ASM_WRITTEN (p) && DECL_INITIAL (p) != 0 && TREE_ADDRESSABLE (p) && DECL_ABSTRACT_ORIGIN (p) != 0 && DECL_ABSTRACT_ORIGIN (p) != p) TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1; if (!DECL_EXTERNAL (p) && !DECL_INITIAL (p) && scope != file_scope && scope != external_scope) { error ("nested function %q+D declared but never defined", p); undef_nested_function = true; } else if (DECL_DECLARED_INLINE_P (p) && TREE_PUBLIC (p) && !DECL_INITIAL (p)) { /* C99 6.7.4p6: "a function with external linkage... declared with an inline function specifier ... shall also be defined in the same translation unit." */ if (!flag_gnu89_inline && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p)) && scope != external_scope) pedwarn (input_location, 0, "inline function %q+D declared but never defined", p); DECL_EXTERNAL (p) = 1; } goto common_symbol; case VAR_DECL: /* Warnings for unused variables. */ if ((!TREE_USED (p) || !DECL_READ_P (p)) && !TREE_NO_WARNING (p) && !DECL_IN_SYSTEM_HEADER (p) && DECL_NAME (p) && !DECL_ARTIFICIAL (p) && scope != file_scope && scope != external_scope) { if (!TREE_USED (p)) warning (OPT_Wunused_variable, "unused variable %q+D", p); else if (DECL_CONTEXT (p) == current_function_decl) warning_at (DECL_SOURCE_LOCATION (p), OPT_Wunused_but_set_variable, "variable %qD set but not used", p); } if (b->inner_comp) { error ("type of array %q+D completed incompatibly with" " implicit initialization", p); } /* Fall through. */ case TYPE_DECL: case CONST_DECL: common_symbol: /* All of these go in BLOCK_VARS, but only if this is the binding in the home scope. */ if (!b->nested) { DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; } else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope) { /* For block local externs add a special DECL_EXTERNAL decl for debug info generation. */ tree extp = copy_node (p); DECL_EXTERNAL (extp) = 1; TREE_STATIC (extp) = 0; TREE_PUBLIC (extp) = 1; DECL_INITIAL (extp) = NULL_TREE; DECL_LANG_SPECIFIC (extp) = NULL; DECL_CONTEXT (extp) = current_function_decl; if (TREE_CODE (p) == FUNCTION_DECL) { DECL_RESULT (extp) = NULL_TREE; DECL_SAVED_TREE (extp) = NULL_TREE; DECL_STRUCT_FUNCTION (extp) = NULL; } if (b->locus != UNKNOWN_LOCATION) DECL_SOURCE_LOCATION (extp) = b->locus; DECL_CHAIN (extp) = BLOCK_VARS (block); BLOCK_VARS (block) = extp; } /* If this is the file scope set DECL_CONTEXT of each decl to the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p work. */ if (scope == file_scope) { DECL_CONTEXT (p) = context; if (TREE_CODE (p) == TYPE_DECL && TREE_TYPE (p) != error_mark_node) set_type_context (TREE_TYPE (p), context); } /* Fall through. */ /* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have already been put there by store_parm_decls. Unused- parameter warnings are handled by function.c. error_mark_node obviously does not go in BLOCK_VARS and does not get unused-variable warnings. */ case PARM_DECL: case ERROR_MARK: /* It is possible for a decl not to have a name. We get here with b->id NULL in this case. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } break; default: gcc_unreachable (); } } /* Dispose of the block that we just made inside some higher level. */ if ((scope->function_body || scope == file_scope) && context) { DECL_INITIAL (context) = block; BLOCK_SUPERCONTEXT (block) = context; } else if (scope->outer) { if (block) SCOPE_LIST_APPEND (scope->outer, blocks, block); /* If we did not make a block for the scope just exited, any blocks made for inner scopes must be carried forward so they will later become subblocks of something else. */ else if (scope->blocks) SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks); } /* Pop the current scope, and free the structure for reuse. */ current_scope = scope->outer; if (scope->function_body) current_function_scope = scope->outer_function; memset (scope, 0, sizeof (struct c_scope)); scope->outer = scope_freelist; scope_freelist = scope; return block; } void push_file_scope (void) { tree decl; if (file_scope) return; push_scope (); file_scope = current_scope; start_fname_decls (); for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl)) bind (DECL_NAME (decl), decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); } void pop_file_scope (void) { /* In case there were missing closebraces, get us back to the global binding level. */ while (current_scope != file_scope) pop_scope (); /* __FUNCTION__ is defined at file scope (""). This call may not be necessary as my tests indicate it still works without it. */ finish_fname_decls (); check_inline_statics (); /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { c_common_write_pch (); return; } /* Pop off the file scope and close this translation unit. */ pop_scope (); file_scope = 0; maybe_apply_pending_pragma_weaks (); } /* Adjust the bindings for the start of a statement expression. */ void c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; ++label_vars->label_bindings.stmt_exprs; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) ++g->goto_bindings.stmt_exprs; } } if (switch_bindings != NULL) ++switch_bindings->stmt_exprs; } /* Adjust the bindings for the end of a statement expression. */ void c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; --label_vars->label_bindings.stmt_exprs; if (label_vars->label_bindings.stmt_exprs < 0) { label_vars->label_bindings.left_stmt_expr = true; label_vars->label_bindings.stmt_exprs = 0; } FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { --g->goto_bindings.stmt_exprs; if (g->goto_bindings.stmt_exprs < 0) { g->goto_bindings.left_stmt_expr = true; g->goto_bindings.stmt_exprs = 0; } } } } if (switch_bindings != NULL) { --switch_bindings->stmt_exprs; gcc_assert (switch_bindings->stmt_exprs >= 0); } } /* Push a definition or a declaration of struct, union or enum tag "name". "type" should be the type node. We assume that the tag "name" is not already defined, and has a location of LOC. Note that the definition may really be just a forward reference. In that case, the TYPE_SIZE will be zero. */ static void pushtag (location_t loc, tree name, tree type) { /* Record the identifier as the type's name if it has none. */ if (name && !TYPE_NAME (type)) TYPE_NAME (type) = name; bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc); /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the tagged type we just added to the current scope. This fake NULL-named TYPE_DECL node helps dwarfout.c to know when it needs to output a representation of a tagged type, and it also gives us a convenient place to record the "scope start" address for the tagged type. */ TYPE_STUB_DECL (type) = pushdecl (build_decl (loc, TYPE_DECL, NULL_TREE, type)); /* An approximation for now, so we can tell this is a function-scope tag. This will be updated in pop_scope. */ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type)); if (warn_cxx_compat && name != NULL_TREE) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b != NULL && b->decl != NULL_TREE && TREE_CODE (b->decl) == TYPE_DECL && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl)) != TYPE_MAIN_VARIANT (type))) { warning_at (loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), b->decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } } /* An exported interface to pushtag. This is used by the gdb plugin's binding oracle to introduce a new tag binding. */ void c_pushtag (location_t loc, tree name, tree type) { pushtag (loc, name, type); } /* An exported interface to bind a declaration. LOC is the location to use. DECL is the declaration to bind. The decl's name is used to determine how it is bound. If DECL is a VAR_DECL, then IS_GLOBAL determines whether the decl is put into the global (file and external) scope or the current function's scope; if DECL is not a VAR_DECL then it is always put into the file scope. */ void c_bind (location_t loc, tree decl, bool is_global) { struct c_scope *scope; bool nested = false; if (TREE_CODE (decl) != VAR_DECL || current_function_scope == NULL) { /* Types and functions are always considered to be global. */ scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else if (is_global) { /* Also bind it into the external scope. */ bind (DECL_NAME (decl), decl, external_scope, true, false, loc); nested = true; scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else { DECL_CONTEXT (decl) = current_function_decl; TREE_PUBLIC (decl) = 0; scope = current_function_scope; } bind (DECL_NAME (decl), decl, scope, false, nested, loc); } /* Subroutine of compare_decls. Allow harmless mismatches in return and argument types provided that the type modes match. This function return a unified type given a suitable match, and 0 otherwise. */ static tree match_builtin_function_types (tree newtype, tree oldtype) { tree newrettype, oldrettype; tree newargs, oldargs; tree trytype, tryargs; /* Accept the return type of the new declaration if same modes. */ oldrettype = TREE_TYPE (oldtype); newrettype = TREE_TYPE (newtype); if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype)) return 0; oldargs = TYPE_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); tryargs = newargs; while (oldargs || newargs) { if (!oldargs || !newargs || !TREE_VALUE (oldargs) || !TREE_VALUE (newargs) || TYPE_MODE (TREE_VALUE (oldargs)) != TYPE_MODE (TREE_VALUE (newargs))) return 0; oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); } trytype = build_function_type (newrettype, tryargs); return build_type_attribute_variant (trytype, TYPE_ATTRIBUTES (oldtype)); } /* Subroutine of diagnose_mismatched_decls. Check for function type mismatch involving an empty arglist vs a nonempty one and give clearer diagnostics. */ static void diagnose_arglist_conflict (tree newdecl, tree olddecl, tree newtype, tree oldtype) { tree t; if (TREE_CODE (olddecl) != FUNCTION_DECL || !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype)) || !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == 0) || (!prototype_p (newtype) && DECL_INITIAL (newdecl) == 0))) return; t = TYPE_ARG_TYPES (oldtype); if (t == 0) t = TYPE_ARG_TYPES (newtype); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == 0 && TYPE_MAIN_VARIANT (type) != void_type_node) { inform (input_location, "a parameter list with an ellipsis can%'t match " "an empty parameter name list declaration"); break; } if (c_type_promotes_to (type) != type) { inform (input_location, "an argument type that has a default promotion can%'t match " "an empty parameter name list declaration"); break; } } } /* Another subroutine of diagnose_mismatched_decls. OLDDECL is an old-style function definition, NEWDECL is a prototype declaration. Diagnose inconsistencies in the argument list. Returns TRUE if the prototype is compatible, FALSE if not. */ static bool validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype) { tree newargs, oldargs; int i; #define END_OF_ARGLIST(t) ((t) == void_type_node) oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); i = 1; for (;;) { tree oldargtype = TREE_VALUE (oldargs); tree newargtype = TREE_VALUE (newargs); if (oldargtype == error_mark_node || newargtype == error_mark_node) return false; oldargtype = (TYPE_ATOMIC (oldargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (oldargtype)); newargtype = (TYPE_ATOMIC (newargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (newargtype)); if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype)) break; /* Reaching the end of just one list means the two decls don't agree on the number of arguments. */ if (END_OF_ARGLIST (oldargtype)) { error ("prototype for %q+D declares more arguments " "than previous old-style definition", newdecl); return false; } else if (END_OF_ARGLIST (newargtype)) { error ("prototype for %q+D declares fewer arguments " "than previous old-style definition", newdecl); return false; } /* Type for passing arg must be consistent with that declared for the arg. */ else if (!comptypes (oldargtype, newargtype)) { error ("prototype for %q+D declares argument %d" " with incompatible type", newdecl, i); return false; } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); i++; } /* If we get here, no errors were found, but do issue a warning for this poor-style construct. */ warning (0, "prototype for %q+D follows non-prototype definition", newdecl); return true; #undef END_OF_ARGLIST } /* Subroutine of diagnose_mismatched_decls. Report the location of DECL, first in a pair of mismatched declarations, using the diagnostic function DIAG. */ static void locate_old_decl (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl) && !C_DECL_DECLARED_BUILTIN (decl)) ; else if (DECL_INITIAL (decl)) inform (input_location, "previous definition of %q+D was here", decl); else if (C_DECL_IMPLICIT (decl)) inform (input_location, "previous implicit declaration of %q+D was here", decl); else inform (input_location, "previous declaration of %q+D was here", decl); } /* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL. Returns true if the caller should proceed to merge the two, false if OLDDECL should simply be discarded. As a side effect, issues all necessary diagnostics for invalid or poor-style combinations. If it returns true, writes the types of NEWDECL and OLDDECL to *NEWTYPEP and *OLDTYPEP - these may have been adjusted from TREE_TYPE (NEWDECL, OLDDECL) respectively. */ static bool diagnose_mismatched_decls (tree newdecl, tree olddecl, tree *newtypep, tree *oldtypep) { tree newtype, oldtype; bool pedwarned = false; bool warned = false; bool retval = true; #define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \ && DECL_EXTERNAL (DECL)) /* If we have error_mark_node for either decl or type, just discard the previous decl - we're in an error cascade already. */ if (olddecl == error_mark_node || newdecl == error_mark_node) return false; *oldtypep = oldtype = TREE_TYPE (olddecl); *newtypep = newtype = TREE_TYPE (newdecl); if (oldtype == error_mark_node || newtype == error_mark_node) return false; /* Two different categories of symbol altogether. This is an error unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */ if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { if (!(TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))) { error ("%q+D redeclared as different kind of symbol", newdecl); locate_old_decl (olddecl); } else if (TREE_PUBLIC (newdecl)) warning (0, "built-in function %q+D declared as non-function", newdecl); else warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); return false; } /* Enumerators have no linkage, so may only be declared once in a given scope. */ if (TREE_CODE (olddecl) == CONST_DECL) { error ("redeclaration of enumerator %q+D", newdecl); locate_old_decl (olddecl); return false; } if (!comptypes (oldtype, newtype)) { if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) { /* Accept harmless mismatch in function types. This is for the ffs and fprintf builtins. */ tree trytype = match_builtin_function_types (newtype, oldtype); if (trytype && comptypes (newtype, trytype)) *oldtypep = oldtype = trytype; else { /* If types don't match for a built-in, throw away the built-in. No point in calling locate_old_decl here, it won't print anything. */ warning (0, "conflicting types for built-in function %q+D", newdecl); return false; } } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_IS_BUILTIN (olddecl)) { /* A conflicting function declaration for a predeclared function that isn't actually built in. Objective C uses these. The new declaration silently overrides everything but the volatility (i.e. noreturn) indication. See also below. FIXME: Make Objective C use normal builtins. */ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); return false; } /* Permit void foo (...) to match int foo (...) if the latter is the definition and implicit int was used. See c-torture/compile/920625-2.c. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node && C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (newdecl) = *newtypep = newtype = oldtype; C_FUNCTION_IMPLICIT_INT (newdecl) = 0; } /* Permit void foo (...) to match an earlier call to foo (...) with no declared type (thus, implicitly int). */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node && C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype; } else { int new_quals = TYPE_QUALS (newtype); int old_quals = TYPE_QUALS (oldtype); if (new_quals != old_quals) { addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals); addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals); if (new_addr != old_addr) { if (ADDR_SPACE_GENERIC_P (new_addr)) error ("conflicting named address spaces (generic vs %s) " "for %q+D", c_addr_space_name (old_addr), newdecl); else if (ADDR_SPACE_GENERIC_P (old_addr)) error ("conflicting named address spaces (%s vs generic) " "for %q+D", c_addr_space_name (new_addr), newdecl); else error ("conflicting named address spaces (%s vs %s) " "for %q+D", c_addr_space_name (new_addr), c_addr_space_name (old_addr), newdecl); } if (CLEAR_QUAL_ADDR_SPACE (new_quals) != CLEAR_QUAL_ADDR_SPACE (old_quals)) error ("conflicting type qualifiers for %q+D", newdecl); } else error ("conflicting types for %q+D", newdecl); diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype); locate_old_decl (olddecl); return false; } } /* Redeclaration of a type is a constraint violation (6.7.2.3p1), but silently ignore the redeclaration if either is in a system header. (Conflicting redeclarations were handled above.) This is allowed for C11 if the types are the same, not just compatible. */ if (TREE_CODE (newdecl) == TYPE_DECL) { bool types_different = false; int comptypes_result; comptypes_result = comptypes_check_different_types (oldtype, newtype, &types_different); if (comptypes_result != 1 || types_different) { error ("redefinition of typedef %q+D with different type", newdecl); locate_old_decl (olddecl); return false; } if (DECL_IN_SYSTEM_HEADER (newdecl) || DECL_IN_SYSTEM_HEADER (olddecl) || TREE_NO_WARNING (newdecl) || TREE_NO_WARNING (olddecl)) return true; /* Allow OLDDECL to continue in use. */ if (variably_modified_type_p (newtype, NULL)) { error ("redefinition of typedef %q+D with variably modified type", newdecl); locate_old_decl (olddecl); } else if (pedwarn_c99 (input_location, OPT_Wpedantic, "redefinition of typedef %q+D", newdecl)) locate_old_decl (olddecl); return true; } /* Function declarations can either be 'static' or 'extern' (no qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore can never conflict with each other on account of linkage (6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but gnu89 mode permits two definitions if one is 'extern inline' and one is not. The non- extern-inline definition supersedes the extern-inline definition. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If you declare a built-in function name as static, or define the built-in with an old-style definition (so we can't validate the argument list) the built-in definition is overridden, but optionally warn this was a bad choice of name. */ if (DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl) && (!TREE_PUBLIC (newdecl) || (DECL_INITIAL (newdecl) && !prototype_p (TREE_TYPE (newdecl))))) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); /* Discard the old built-in function. */ return false; } if (DECL_INITIAL (newdecl)) { if (DECL_INITIAL (olddecl)) { /* If both decls are in the same TU and the new declaration isn't overriding an extern inline reject the new decl. In c99, no overriding is allowed in the same translation unit. */ if ((!DECL_EXTERN_INLINE (olddecl) || DECL_EXTERN_INLINE (newdecl) || (!flag_gnu89_inline && (!DECL_DECLARED_INLINE_P (olddecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl))) && (!DECL_DECLARED_INLINE_P (newdecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)))) ) && same_translation_unit_p (newdecl, olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } } } /* If we have a prototype after an old-style function definition, the argument types must be checked specially. */ else if (DECL_INITIAL (olddecl) && !prototype_p (oldtype) && prototype_p (newtype) && TYPE_ACTUAL_ARG_TYPES (oldtype) && !validate_proto_after_old_defn (newdecl, newtype, oldtype)) { locate_old_decl (olddecl); return false; } /* A non-static declaration (even an "extern") followed by a static declaration is undefined behavior per C99 6.2.2p3-5,7. The same is true for a static forward declaration at block scope followed by a non-static declaration/definition at file scope. Static followed by non-static at the same scope is not undefined behavior, and is the most convenient way to get some effects (see e.g. what unwind-dw2-fde-glibc.c does to the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but we do diagnose it if -Wtraditional. */ if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl)) { /* Two exceptions to the rule. If olddecl is an extern inline, or a predeclared function that isn't actually built in, newdecl silently overrides olddecl. The latter occur only in Objective C; see also above. (FIXME: Make Objective C use normal builtins.) */ if (!DECL_IS_BUILTIN (olddecl) && !DECL_EXTERN_INLINE (olddecl)) { error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); } return false; } else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl)) { if (DECL_CONTEXT (olddecl)) { error ("non-static declaration of %q+D follows " "static declaration", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } /* Make sure gnu_inline attribute is either not present, or present on all inline decls. */ if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool newa = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) != NULL; bool olda = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl)) != NULL; if (newa != olda) { error_at (input_location, "%<gnu_inline%> attribute present on %q+D", newa ? newdecl : olddecl); error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl), "but not here"); } } } else if (TREE_CODE (newdecl) == VAR_DECL) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl)) { /* Nothing to check. Since OLDDECL is marked threadprivate and NEWDECL does not have a thread-local attribute, we will merge the threadprivate attribute into NEWDECL. */ ; } else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl)) { if (DECL_THREAD_LOCAL_P (newdecl)) error ("thread-local declaration of %q+D follows " "non-thread-local declaration", newdecl); else error ("non-thread-local declaration of %q+D follows " "thread-local declaration", newdecl); locate_old_decl (olddecl); return false; } /* Multiple initialized definitions are not allowed (6.9p3,5). */ if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } /* Objects declared at file scope: if the first declaration had external linkage (even if it was an external reference) the second must have external linkage as well, or the behavior is undefined. If the first declaration had internal linkage, then the second must too, or else be an external reference (in which case the composite declaration still has internal linkage). As for function declarations, we warn about the static-then- extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */ if (DECL_FILE_SCOPE_P (newdecl) && TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl)) { if (DECL_EXTERNAL (newdecl)) { if (!DECL_FILE_SCOPE_P (olddecl)) { error ("extern declaration of %q+D follows " "declaration with no linkage", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } else { if (TREE_PUBLIC (newdecl)) error ("non-static declaration of %q+D follows " "static declaration", newdecl); else error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); return false; } } /* Two objects with the same name declared at the same block scope must both be external references (6.7p3). */ else if (!DECL_FILE_SCOPE_P (newdecl)) { if (DECL_EXTERNAL (newdecl)) { /* Extern with initializer at block scope, which will already have received an error. */ } else if (DECL_EXTERNAL (olddecl)) { error ("declaration of %q+D with no linkage follows " "extern declaration", newdecl); locate_old_decl (olddecl); } else { error ("redeclaration of %q+D with no linkage", newdecl); locate_old_decl (olddecl); } return false; } /* C++ does not permit a decl to appear multiple times at file scope. */ if (warn_cxx_compat && DECL_FILE_SCOPE_P (newdecl) && !DECL_EXTERNAL (newdecl) && !DECL_EXTERNAL (olddecl)) warned |= warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wc___compat, ("duplicate declaration of %qD is " "invalid in C++"), newdecl); } /* warnings */ /* All decls must agree on a visibility. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { warned |= warning (0, "redeclaration of %q+D with different visibility " "(old visibility preserved)", newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* Diagnose inline __attribute__ ((noinline)) which is silly. */ if (DECL_DECLARED_INLINE_P (newdecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "inline declaration of %qD follows " "declaration with attribute noinline", newdecl); else if (DECL_DECLARED_INLINE_P (olddecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "noinline follows inline declaration ", newdecl); else if (lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("always_inline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "noinline", "always_inline"); else if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "always_inline", "noinline"); else if (lookup_attribute ("cold", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("hot", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "cold", "hot"); else if (lookup_attribute ("hot", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("cold", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "hot", "cold"); } else /* PARM_DECL, VAR_DECL */ { /* Redeclaration of a parameter is a constraint violation (this is not explicitly stated, but follows from C99 6.7p3 [no more than one declaration of the same identifier with no linkage in the same scope, except type tags] and 6.2.2p6 [parameters have no linkage]). We must check for a forward parameter declaration, indicated by TREE_ASM_WRITTEN on the old declaration - this is an extension, the mandatory diagnostic for which is handled by mark_forward_parm_decls. */ if (TREE_CODE (newdecl) == PARM_DECL && (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl))) { error ("redefinition of parameter %q+D", newdecl); locate_old_decl (olddecl); return false; } } /* Optional warning for completely redundant decls. */ if (!warned && !pedwarned && warn_redundant_decls /* Don't warn about a function declaration followed by a definition. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)) /* Don't warn about redundant redeclarations of builtins. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && !DECL_BUILT_IN (newdecl) && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) /* Don't warn about an extern followed by a definition. */ && !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl)) /* Don't warn about forward parameter decls. */ && !(TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) /* Don't warn about a variable definition following a declaration. */ && !(TREE_CODE (newdecl) == VAR_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))) { warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D", newdecl); } /* Report location of previous decl/defn. */ if (warned || pedwarned) locate_old_decl (olddecl); #undef DECL_EXTERN_INLINE return retval; } /* Subroutine of duplicate_decls. NEWDECL has been found to be consistent with OLDDECL, but carries new information. Merge the new information into OLDDECL. This function issues no diagnostics. */ static void merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype) { bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != 0); bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (newdecl))); bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (olddecl))); /* For real parm decl following a forward decl, rechain the old decl in its new location and clear TREE_ASM_WRITTEN (it's not a forward decl anymore). */ if (TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) { struct c_binding *b, **here; for (here = &current_scope->bindings; *here; here = &(*here)->prev) if ((*here)->decl == olddecl) goto found; gcc_unreachable (); found: b = *here; *here = b->prev; b->prev = current_scope->bindings; current_scope->bindings = b; TREE_ASM_WRITTEN (olddecl) = 0; } DECL_ATTRIBUTES (newdecl) = targetm.merge_decl_attributes (olddecl, newdecl); /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = newtype; newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) TYPE_ALIGN (newtype) = TYPE_ALIGN (tem); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } /* Merge the data types specified in the two decls. */ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = composite_type (newtype, oldtype); /* Lay the type out, unless already done. */ if (!comptypes (oldtype, TREE_TYPE (newdecl))) { if (TREE_TYPE (newdecl) != error_mark_node) layout_type (TREE_TYPE (newdecl)); if (TREE_CODE (newdecl) != FUNCTION_DECL && TREE_CODE (newdecl) != TYPE_DECL && TREE_CODE (newdecl) != CONST_DECL) layout_decl (newdecl, 0); } else { /* Since the type is OLDDECL's, make OLDDECL's size go with. */ DECL_SIZE (newdecl) = DECL_SIZE (olddecl); DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl); DECL_MODE (newdecl) = DECL_MODE (olddecl); if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } } /* Keep the old rtl since we can safely use it. */ if (HAS_RTL_P (olddecl)) COPY_DECL_RTL (olddecl, newdecl); /* Merge the type qualifiers. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* If a decl is in a system header and the other isn't, keep the one on the system header. Otherwise, keep source location of definition rather than declaration and of prototype rather than non-prototype unless that prototype is built-in. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (olddecl) && !DECL_IN_SYSTEM_HEADER (newdecl) ) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (newdecl) && !DECL_IN_SYSTEM_HEADER (olddecl)) DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl); else if ((DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0) || (old_is_prototype && !new_is_prototype && !C_DECL_BUILTIN_PROTOTYPE (olddecl))) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == 0) DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); /* Merge the threadprivate attribute. */ if (TREE_CODE (olddecl) == VAR_DECL && C_DECL_THREADPRIVATE_P (olddecl)) C_DECL_THREADPRIVATE_P (newdecl) = 1; if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)) { /* Copy the assembler name. Currently, it can only be defined in the prototype. */ COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Use visibility of whichever declaration had it specified */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); /* For functions, static overrides non-static. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl); /* This is since we don't automatically copy the attributes of NEWDECL into OLDDECL. */ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); /* If this clears `static', clear it in the identifier too. */ if (!TREE_PUBLIC (olddecl)) TREE_PUBLIC (DECL_NAME (olddecl)) = 0; } } /* In c99, 'extern' declaration before (or after) 'inline' means this function is not DECL_EXTERNAL, unless 'gnu_inline' attribute is present. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && !flag_gnu89_inline && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && (!DECL_DECLARED_INLINE_P (newdecl) || !DECL_DECLARED_INLINE_P (olddecl) || !DECL_EXTERNAL (olddecl)) && DECL_EXTERNAL (newdecl) && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) && !current_function_decl) DECL_EXTERNAL (newdecl) = 0; /* An inline definition following a static declaration is not DECL_EXTERNAL. */ if (new_is_definition && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && !TREE_PUBLIC (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (DECL_EXTERNAL (newdecl)) { TREE_STATIC (newdecl) = TREE_STATIC (olddecl); DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl); /* An extern decl does not override previous storage class. */ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); if (!DECL_EXTERNAL (newdecl)) { DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); } } else { TREE_STATIC (olddecl) = TREE_STATIC (newdecl); TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If we're redefining a function previously defined as extern inline, make sure we emit debug info for the inline before we throw it away, in case it was inlined into a function that hasn't been written out yet. */ if (new_is_definition && DECL_INITIAL (olddecl)) /* The new defn must not be inline. */ DECL_UNINLINABLE (newdecl) = 1; else { /* If either decl says `inline', this fn is inline, unless its definition was passed already. */ if (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) DECL_DECLARED_INLINE_P (newdecl) = 1; DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } if (DECL_BUILT_IN (olddecl)) { /* If redeclaring a builtin function, it stays built in. But it gets tagged as having been declared. */ DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl); DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl); C_DECL_DECLARED_BUILTIN (newdecl) = 1; if (new_is_prototype) { C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0; if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } } } else C_DECL_BUILTIN_PROTOTYPE (newdecl) = C_DECL_BUILTIN_PROTOTYPE (olddecl); } /* Preserve function specific target and optimization options */ if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); /* Also preserve various other info from the definition. */ if (!new_is_definition) { tree t; DECL_RESULT (newdecl) = DECL_RESULT (olddecl); DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl)); for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = newdecl; /* See if we've got a function to instantiate from. */ if (DECL_SAVED_TREE (olddecl)) DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ABSTRACT_ORIGIN (olddecl); } } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (TREE_CODE (olddecl) == VAR_DECL || TREE_CODE (olddecl) == PARM_DECL) DECL_READ_P (newdecl) |= DECL_READ_P (olddecl); if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Copy most of the decl-specific fields of NEWDECL into OLDDECL. But preserve OLDDECL's DECL_UID, DECL_CONTEXT and DECL_ARGUMENTS (if appropriate). */ { unsigned olddecl_uid = DECL_UID (olddecl); tree olddecl_context = DECL_CONTEXT (olddecl); tree olddecl_arguments = NULL; if (TREE_CODE (olddecl) == FUNCTION_DECL) olddecl_arguments = DECL_ARGUMENTS (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); switch (TREE_CODE (olddecl)) { case FUNCTION_DECL: case VAR_DECL: { struct symtab_node *snode = olddecl->decl_with_vis.symtab_node; memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); olddecl->decl_with_vis.symtab_node = snode; if ((DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) && DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); /* This isn't quite correct for something like int __thread x attribute ((tls_model ("local-exec"))); extern int __thread x; as we'll lose the "local-exec" model. */ if (TREE_CODE (olddecl) == VAR_DECL && DECL_THREAD_LOCAL_P (newdecl)) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); break; } case FIELD_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: case CONST_DECL: case TYPE_DECL: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common)); } DECL_UID (olddecl) = olddecl_uid; DECL_CONTEXT (olddecl) = olddecl_context; if (TREE_CODE (olddecl) == FUNCTION_DECL) DECL_ARGUMENTS (olddecl) = olddecl_arguments; } /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (TREE_CODE (olddecl) == VAR_DECL && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); } /* Handle when a new declaration NEWDECL has the same name as an old one OLDDECL in the same binding contour. Prints an error message if appropriate. If safely possible, alter OLDDECL to look like NEWDECL, and return true. Otherwise, return false. */ static bool duplicate_decls (tree newdecl, tree olddecl) { tree newtype = NULL, oldtype = NULL; if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype)) { /* Avoid `unused variable' and other warnings for OLDDECL. */ TREE_NO_WARNING (olddecl) = 1; return false; } merge_decls (newdecl, olddecl, newtype, oldtype); /* The NEWDECL will no longer be needed. Before releasing the node, be sure to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between NEWDECL and OLDECL. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (TREE_CODE (newdecl) == FUNCTION_DECL || TREE_CODE (newdecl) == VAR_DECL) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } ggc_free (newdecl); return true; } /* Check whether decl-node NEW_DECL shadows an existing declaration. */ static void warn_if_shadowing (tree new_decl) { struct c_binding *b; /* Shadow warnings wanted? */ if (!warn_shadow /* No shadow warnings for internally generated vars. */ || DECL_IS_BUILTIN (new_decl) /* No shadow warnings for vars made for inlining. */ || DECL_FROM_INLINE (new_decl)) return; /* Is anything being shadowed? Invisible decls do not count. */ for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed) if (b->decl && b->decl != new_decl && !b->invisible && (b->decl == error_mark_node || diagnostic_report_warnings_p (global_dc, DECL_SOURCE_LOCATION (b->decl)))) { tree old_decl = b->decl; bool warned = false; if (old_decl == error_mark_node) { warning (OPT_Wshadow, "declaration of %q+D shadows previous " "non-variable", new_decl); break; } else if (TREE_CODE (old_decl) == PARM_DECL) warned = warning (OPT_Wshadow, "declaration of %q+D shadows a parameter", new_decl); else if (DECL_FILE_SCOPE_P (old_decl)) { /* Do not warn if a variable shadows a function, unless the variable is a function or a pointer-to-function. */ if (TREE_CODE (old_decl) == FUNCTION_DECL && TREE_CODE (new_decl) != FUNCTION_DECL && !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl))) continue; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow, "declaration of %qD shadows a global " "declaration", new_decl); } else if (TREE_CODE (old_decl) == FUNCTION_DECL && DECL_BUILT_IN (old_decl)) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", new_decl); break; } else warned = warning (OPT_Wshadow, "declaration of %q+D shadows a " "previous local", new_decl); if (warned) inform (DECL_SOURCE_LOCATION (old_decl), "shadowed declaration is here"); break; } } /* Record a decl-node X as belonging to the current lexical scope. Check for errors (such as an incompatible declaration for the same name already seen in the same scope). Returns either X or an old decl for the same name. If an old decl is returned, it may have been smashed to agree with what X says. */ tree pushdecl (tree x) { tree name = DECL_NAME (x); struct c_scope *scope = current_scope; struct c_binding *b; bool nested = false; location_t locus = DECL_SOURCE_LOCATION (x); /* Must set DECL_CONTEXT for everything not at file scope or DECL_FILE_SCOPE_P won't work. Local externs don't count unless they have initializers (which generate code). */ if (current_function_decl && ((TREE_CODE (x) != FUNCTION_DECL && TREE_CODE (x) != VAR_DECL) || DECL_INITIAL (x) || !DECL_EXTERNAL (x))) DECL_CONTEXT (x) = current_function_decl; /* Anonymous decls are just inserted in the scope. */ if (!name) { bind (name, x, scope, /*invisible=*/false, /*nested=*/false, locus); return x; } /* First, see if there is another declaration with the same name in the current scope. If there is, duplicate_decls may do all the work for us. If duplicate_decls returns false, that indicates two incompatible decls in the same scope; we are to silently replace the old one (duplicate_decls has issued all appropriate diagnostics). In particular, we should not consider possible duplicates in the external scope, or shadowing. */ b = I_SYMBOL_BINDING (name); if (b && B_IN_SCOPE (b, scope)) { struct c_binding *b_ext, *b_use; tree type = TREE_TYPE (x); tree visdecl = b->decl; tree vistype = TREE_TYPE (visdecl); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && COMPLETE_TYPE_P (TREE_TYPE (x))) b->inner_comp = false; b_use = b; b_ext = b; /* If this is an external linkage declaration, we should check for compatibility with the type in the external scope before setting the type at this scope based on the visible information only. */ if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl)) { while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { b_use = b_ext; if (b_use->u.type) TREE_TYPE (b_use->decl) = b_use->u.type; } } if (duplicate_decls (x, b_use->decl)) { if (b_use != b) { /* Save the updated type in the external scope and restore the proper type for this scope. */ tree thistype; if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b_use->decl); b_use->u.type = TREE_TYPE (b_use->decl); if (TREE_CODE (b_use->decl) == FUNCTION_DECL && DECL_BUILT_IN (b_use->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b_use->u.type)); TREE_TYPE (b_use->decl) = thistype; } return b_use->decl; } else goto skip_external_and_shadow_checks; } /* All declarations with external linkage, and all external references, go in the external scope, no matter what scope is current. However, the binding in that scope is ignored for purposes of normal name lookup. A separate binding structure is created in the requested scope; this governs the normal visibility of the symbol. The binding in the externals scope is used exclusively for detecting duplicate declarations of the same object, no matter what scope they are in; this is what we do here. (C99 6.2.7p2: All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined.) */ if (DECL_EXTERNAL (x) || scope == file_scope) { tree type = TREE_TYPE (x); tree vistype = 0; tree visdecl = 0; bool type_saved = false; if (b && !B_IN_EXTERNAL_SCOPE (b) && (TREE_CODE (b->decl) == FUNCTION_DECL || TREE_CODE (b->decl) == VAR_DECL) && DECL_FILE_SCOPE_P (b->decl)) { visdecl = b->decl; vistype = TREE_TYPE (visdecl); } if (scope != file_scope && !DECL_IN_SYSTEM_HEADER (x)) warning (OPT_Wnested_externs, "nested extern declaration of %qD", x); while (b && !B_IN_EXTERNAL_SCOPE (b)) { /* If this decl might be modified, save its type. This is done here rather than when the decl is first bound because the type may change after first binding, through being completed or through attributes being added. If we encounter multiple such decls, only the first should have its type saved; the others will already have had their proper types saved and the types will not have changed as their scopes will not have been re-entered. */ if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved) { b->u.type = TREE_TYPE (b->decl); type_saved = true; } if (B_IN_FILE_SCOPE (b) && TREE_CODE (b->decl) == VAR_DECL && TREE_STATIC (b->decl) && TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (b->decl)) && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { /* Array type completed in inner scope, which should be diagnosed if the completion does not have size 1 and it does not get completed in the file scope. */ b->inner_comp = true; } b = b->shadowed; } /* If a matching external declaration has been found, set its type to the composite of all the types of that declaration. After the consistency checks, it will be reset to the composite of the visible types only. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && b->u.type) TREE_TYPE (b->decl) = b->u.type; /* The point of the same_translation_unit_p check here is, we want to detect a duplicate decl for a construct like foo() { extern bar(); } ... static bar(); but not if they are in different translation units. In any case, the static does not go in the externals scope. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && duplicate_decls (x, b->decl)) { tree thistype; if (vistype) { if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b->decl); } else thistype = type; b->u.type = TREE_TYPE (b->decl); if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b->u.type)); TREE_TYPE (b->decl) = thistype; bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true, locus); return b->decl; } else if (TREE_PUBLIC (x)) { if (visdecl && !b && duplicate_decls (x, visdecl)) { /* An external declaration at block scope referring to a visible entity with internal linkage. The composite type will already be correct for this scope, so we just need to fall through to make the declaration in this scope. */ nested = true; x = visdecl; } else { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, locus); nested = true; } } } if (TREE_CODE (x) != PARM_DECL) warn_if_shadowing (x); skip_external_and_shadow_checks: if (TREE_CODE (x) == TYPE_DECL) { /* So this is a typedef, set its underlying type. */ set_underlying_type (x); /* If X is a typedef defined in the current function, record it for the purpose of implementing the -Wunused-local-typedefs warning. */ record_locally_defined_typedef (x); } bind (name, x, scope, /*invisible=*/false, nested, locus); /* If x's type is incomplete because it's based on a structure or union which has not yet been fully declared, attach it to that structure or union type, so we can go back and complete the variable declaration later, if the structure or union gets fully declared. If the input is erroneous, we can have error_mark in the type slot (e.g. "f(void a, ...)") - that doesn't count as an incomplete type. */ if (TREE_TYPE (x) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (x))) { tree element = TREE_TYPE (x); while (TREE_CODE (element) == ARRAY_TYPE) element = TREE_TYPE (element); element = TYPE_MAIN_VARIANT (element); if ((TREE_CODE (element) == RECORD_TYPE || TREE_CODE (element) == UNION_TYPE) && (TREE_CODE (x) != TYPE_DECL || TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE) && !COMPLETE_TYPE_P (element)) C_TYPE_INCOMPLETE_VARS (element) = tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element)); } return x; } /* Record X as belonging to file scope. This is used only internally by the Objective-C front end, and is limited to its needs. duplicate_decls is not called; if there is any preexisting decl for this identifier, it is an ICE. */ tree pushdecl_top_level (tree x) { tree name; bool nested = false; gcc_assert (TREE_CODE (x) == VAR_DECL || TREE_CODE (x) == CONST_DECL); name = DECL_NAME (x); gcc_assert (TREE_CODE (x) == CONST_DECL || !I_SYMBOL_BINDING (name)); if (TREE_PUBLIC (x)) { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); nested = true; } if (file_scope) bind (name, x, file_scope, /*invisible=*/false, nested, UNKNOWN_LOCATION); return x; } static void implicit_decl_warning (location_t loc, tree id, tree olddecl) { if (warn_implicit_function_declaration) { bool warned; if (flag_isoc99) warned = pedwarn (loc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE", id); else warned = warning_at (loc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE"), id); if (olddecl && warned) locate_old_decl (olddecl); } } /* This function represents mapping of a function code FCODE to its respective header. */ static const char * header_for_builtin_fn (enum built_in_function fcode) { switch (fcode) { CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ASIN): CASE_FLT_FN (BUILT_IN_ASINH): CASE_FLT_FN (BUILT_IN_ATAN): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_ATAN2): CASE_FLT_FN (BUILT_IN_CBRT): CASE_FLT_FN (BUILT_IN_CEIL): CASE_FLT_FN (BUILT_IN_COPYSIGN): CASE_FLT_FN (BUILT_IN_COS): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_ERF): CASE_FLT_FN (BUILT_IN_ERFC): CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_FABS): CASE_FLT_FN (BUILT_IN_FDIM): CASE_FLT_FN (BUILT_IN_FLOOR): CASE_FLT_FN (BUILT_IN_FMA): CASE_FLT_FN (BUILT_IN_FMAX): CASE_FLT_FN (BUILT_IN_FMIN): CASE_FLT_FN (BUILT_IN_FMOD): CASE_FLT_FN (BUILT_IN_FREXP): CASE_FLT_FN (BUILT_IN_HYPOT): CASE_FLT_FN (BUILT_IN_ILOGB): CASE_FLT_FN (BUILT_IN_LDEXP): CASE_FLT_FN (BUILT_IN_LGAMMA): CASE_FLT_FN (BUILT_IN_LLRINT): CASE_FLT_FN (BUILT_IN_LLROUND): CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOGB): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_MODF): CASE_FLT_FN (BUILT_IN_NAN): CASE_FLT_FN (BUILT_IN_NEARBYINT): CASE_FLT_FN (BUILT_IN_NEXTAFTER): CASE_FLT_FN (BUILT_IN_NEXTTOWARD): CASE_FLT_FN (BUILT_IN_POW): CASE_FLT_FN (BUILT_IN_REMAINDER): CASE_FLT_FN (BUILT_IN_REMQUO): CASE_FLT_FN (BUILT_IN_RINT): CASE_FLT_FN (BUILT_IN_ROUND): CASE_FLT_FN (BUILT_IN_SCALBLN): CASE_FLT_FN (BUILT_IN_SCALBN): CASE_FLT_FN (BUILT_IN_SIN): CASE_FLT_FN (BUILT_IN_SINH): CASE_FLT_FN (BUILT_IN_SINCOS): CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN (BUILT_IN_TAN): CASE_FLT_FN (BUILT_IN_TANH): CASE_FLT_FN (BUILT_IN_TGAMMA): CASE_FLT_FN (BUILT_IN_TRUNC): case BUILT_IN_ISINF: case BUILT_IN_ISNAN: return "<math.h>"; CASE_FLT_FN (BUILT_IN_CABS): CASE_FLT_FN (BUILT_IN_CACOS): CASE_FLT_FN (BUILT_IN_CACOSH): CASE_FLT_FN (BUILT_IN_CARG): CASE_FLT_FN (BUILT_IN_CASIN): CASE_FLT_FN (BUILT_IN_CASINH): CASE_FLT_FN (BUILT_IN_CATAN): CASE_FLT_FN (BUILT_IN_CATANH): CASE_FLT_FN (BUILT_IN_CCOS): CASE_FLT_FN (BUILT_IN_CCOSH): CASE_FLT_FN (BUILT_IN_CEXP): CASE_FLT_FN (BUILT_IN_CIMAG): CASE_FLT_FN (BUILT_IN_CLOG): CASE_FLT_FN (BUILT_IN_CONJ): CASE_FLT_FN (BUILT_IN_CPOW): CASE_FLT_FN (BUILT_IN_CPROJ): CASE_FLT_FN (BUILT_IN_CREAL): CASE_FLT_FN (BUILT_IN_CSIN): CASE_FLT_FN (BUILT_IN_CSINH): CASE_FLT_FN (BUILT_IN_CSQRT): CASE_FLT_FN (BUILT_IN_CTAN): CASE_FLT_FN (BUILT_IN_CTANH): return "<complex.h>"; case BUILT_IN_MEMCHR: case BUILT_IN_MEMCMP: case BUILT_IN_MEMCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: case BUILT_IN_STRCAT: case BUILT_IN_STRCHR: case BUILT_IN_STRCMP: case BUILT_IN_STRCPY: case BUILT_IN_STRCSPN: case BUILT_IN_STRLEN: case BUILT_IN_STRNCAT: case BUILT_IN_STRNCMP: case BUILT_IN_STRNCPY: case BUILT_IN_STRPBRK: case BUILT_IN_STRRCHR: case BUILT_IN_STRSPN: case BUILT_IN_STRSTR: return "<string.h>"; case BUILT_IN_FPRINTF: case BUILT_IN_PUTC: case BUILT_IN_FPUTC: case BUILT_IN_FPUTS: case BUILT_IN_FSCANF: case BUILT_IN_FWRITE: case BUILT_IN_PRINTF: case BUILT_IN_PUTCHAR: case BUILT_IN_PUTS: case BUILT_IN_SCANF: case BUILT_IN_SNPRINTF: case BUILT_IN_SPRINTF: case BUILT_IN_SSCANF: case BUILT_IN_VFPRINTF: case BUILT_IN_VFSCANF: case BUILT_IN_VPRINTF: case BUILT_IN_VSCANF: case BUILT_IN_VSNPRINTF: case BUILT_IN_VSPRINTF: case BUILT_IN_VSSCANF: return "<stdio.h>"; case BUILT_IN_ISALNUM: case BUILT_IN_ISALPHA: case BUILT_IN_ISBLANK: case BUILT_IN_ISCNTRL: case BUILT_IN_ISDIGIT: case BUILT_IN_ISGRAPH: case BUILT_IN_ISLOWER: case BUILT_IN_ISPRINT: case BUILT_IN_ISPUNCT: case BUILT_IN_ISSPACE: case BUILT_IN_ISUPPER: case BUILT_IN_ISXDIGIT: case BUILT_IN_TOLOWER: case BUILT_IN_TOUPPER: return "<ctype.h>"; case BUILT_IN_ISWALNUM: case BUILT_IN_ISWALPHA: case BUILT_IN_ISWBLANK: case BUILT_IN_ISWCNTRL: case BUILT_IN_ISWDIGIT: case BUILT_IN_ISWGRAPH: case BUILT_IN_ISWLOWER: case BUILT_IN_ISWPRINT: case BUILT_IN_ISWPUNCT: case BUILT_IN_ISWSPACE: case BUILT_IN_ISWUPPER: case BUILT_IN_ISWXDIGIT: case BUILT_IN_TOWLOWER: case BUILT_IN_TOWUPPER: return "<wctype.h>"; case BUILT_IN_ABORT: case BUILT_IN_ABS: case BUILT_IN_CALLOC: case BUILT_IN_EXIT: case BUILT_IN_FREE: case BUILT_IN_LABS: case BUILT_IN_LLABS: case BUILT_IN_MALLOC: case BUILT_IN_REALLOC: case BUILT_IN__EXIT2: case BUILT_IN_ALIGNED_ALLOC: return "<stdlib.h>"; case BUILT_IN_IMAXABS: return "<inttypes.h>"; case BUILT_IN_STRFTIME: return "<time.h>"; default: return NULL; } } /* Generate an implicit declaration for identifier FUNCTIONID at LOC as a function of type int (). */ tree implicitly_declare (location_t loc, tree functionid) { struct c_binding *b; tree decl = 0; tree asmspec_tree; for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed) { if (B_IN_SCOPE (b, external_scope)) { decl = b->decl; break; } } if (decl) { if (decl == error_mark_node) return decl; /* FIXME: Objective-C has weird not-really-builtin functions which are supposed to be visible automatically. They wind up in the external scope because they're pushed before the file scope gets created. Catch this here and rebind them into the file scope. */ if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl)) { bind (functionid, decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } else { tree newtype = default_function_type; if (b->u.type) TREE_TYPE (decl) = b->u.type; /* Implicit declaration of a function already declared (somehow) in a different scope, or as a built-in. If this is the first time this has happened, warn; then recycle the old declaration but with the new type. */ if (!C_DECL_IMPLICIT (decl)) { implicit_decl_warning (loc, functionid, decl); C_DECL_IMPLICIT (decl) = 1; } if (DECL_BUILT_IN (decl)) { newtype = build_type_attribute_variant (newtype, TYPE_ATTRIBUTES (TREE_TYPE (decl))); if (!comptypes (newtype, TREE_TYPE (decl))) { bool warned = warning_at (loc, 0, "incompatible implicit " "declaration of built-in " "function %qD", decl); /* See if we can hint which header to include. */ const char *header = header_for_builtin_fn (DECL_FUNCTION_CODE (decl)); if (header != NULL && warned) inform (loc, "include %qs or provide a declaration of %qD", header, decl); newtype = TREE_TYPE (decl); } } else { if (!comptypes (newtype, TREE_TYPE (decl))) { error_at (loc, "incompatible implicit declaration of " "function %qD", decl); locate_old_decl (decl); } } b->u.type = TREE_TYPE (decl); TREE_TYPE (decl) = newtype; bind (functionid, decl, current_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } } /* Not seen before. */ decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; C_DECL_IMPLICIT (decl) = 1; implicit_decl_warning (loc, functionid, 0); asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL); if (asmspec_tree) set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree)); /* C89 says implicit declarations are in the innermost block. So we record the decl in the standard fashion. */ decl = pushdecl (decl); /* No need to call objc_check_decl here - it's a function type. */ rest_of_decl_compilation (decl, 0, 0); /* Write a record describing this implicit function declaration to the prototypes file (if requested). */ gen_aux_info_record (decl, 0, 1, 0); /* Possibly apply some default attributes to this implicit declaration. */ decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Issue an error message for a reference to an undeclared variable ID, including a reference to a builtin outside of function-call context. Establish a binding of the identifier to error_mark_node in an appropriate scope, which will suppress further errors for the same identifier. The error message should be given location LOC. */ void undeclared_variable (location_t loc, tree id) { static bool already = false; struct c_scope *scope; if (current_function_decl == 0) { error_at (loc, "%qE undeclared here (not in a function)", id); scope = current_scope; } else { if (!objc_diagnose_private_ivar (id)) error_at (loc, "%qE undeclared (first use in this function)", id); if (!already) { inform (loc, "each undeclared identifier is reported only" " once for each function it appears in"); already = true; } /* If we are parsing old-style parameter decls, current_function_decl will be nonnull but current_function_scope will be null. */ scope = current_function_scope ? current_function_scope : current_scope; } bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of lookup_label, declare_label, define_label: construct a LABEL_DECL with all the proper frills. Also create a struct c_label_vars initialized for the current scope. */ static tree make_label (location_t location, tree name, bool defining, struct c_label_vars **p_label_vars) { tree label = build_decl (location, LABEL_DECL, name, void_type_node); DECL_CONTEXT (label) = current_function_decl; DECL_MODE (label) = VOIDmode; c_label_vars *label_vars = ggc_alloc<c_label_vars> (); label_vars->shadowed = NULL; set_spot_bindings (&label_vars->label_bindings, defining); label_vars->decls_in_scope = make_tree_vector (); label_vars->gotos = NULL; *p_label_vars = label_vars; return label; } /* Get the LABEL_DECL corresponding to identifier NAME as a label. Create one if none exists so far for the current function. This is called when a label is used in a goto expression or has its address taken. */ tree lookup_label (tree name) { tree label; struct c_label_vars *label_vars; if (current_function_scope == 0) { error ("label %qE referenced outside of any function", name); return 0; } /* Use a label already defined or ref'd with this name, but not if it is inherited from a containing function and wasn't declared using __label__. */ label = I_LABEL_DECL (name); if (label && (DECL_CONTEXT (label) == current_function_decl || C_DECLARED_LABEL_FLAG (label))) { /* If the label has only been declared, update its apparent location to point here, for better diagnostics if it turns out not to have been defined. */ if (DECL_INITIAL (label) == NULL_TREE) DECL_SOURCE_LOCATION (label) = input_location; return label; } /* No label binding for that identifier; make one. */ label = make_label (input_location, name, false, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); return label; } /* Issue a warning about DECL for a goto statement at GOTO_LOC going to LABEL. */ static void warn_about_goto (location_t goto_loc, tree label, tree decl) { if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) error_at (goto_loc, "jump into scope of identifier with variably modified type"); else warning_at (goto_loc, OPT_Wjump_misses_init, "jump skips variable initialization"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl); } /* Look up a label because of a goto statement. This is like lookup_label, but also issues any appropriate warnings. */ tree lookup_label_for_goto (location_t loc, tree name) { tree label; struct c_label_vars *label_vars; unsigned int ix; tree decl; label = lookup_label (name); if (label == NULL_TREE) return NULL_TREE; /* If we are jumping to a different function, we can't issue any useful warnings. */ if (DECL_CONTEXT (label) != current_function_decl) { gcc_assert (C_DECLARED_LABEL_FLAG (label)); return label; } label_vars = I_LABEL_BINDING (name)->u.label; /* If the label has not yet been defined, then push this goto on a list for possible later warnings. */ if (label_vars->label_bindings.scope == NULL) { c_goto_bindings *g = ggc_alloc<c_goto_bindings> (); g->loc = loc; set_spot_bindings (&g->goto_bindings, true); vec_safe_push (label_vars->gotos, g); return label; } /* If there are any decls in label_vars->decls_in_scope, then this goto has missed the declaration of the decl. This happens for a case like int i = 1; lab: ... goto lab; Issue a warning or error. */ FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl) warn_about_goto (loc, label, decl); if (label_vars->label_bindings.left_stmt_expr) { error_at (loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } return label; } /* Make a label named NAME in the current function, shadowing silently any that may be inherited from containing functions or containing scopes. This is called for __label__ declarations. */ tree declare_label (tree name) { struct c_binding *b = I_LABEL_BINDING (name); tree label; struct c_label_vars *label_vars; /* Check to make sure that the label hasn't already been declared at this scope */ if (b && B_IN_CURRENT_SCOPE (b)) { error ("duplicate label declaration %qE", name); locate_old_decl (b->decl); /* Just use the previous declaration. */ return b->decl; } label = make_label (input_location, name, false, &label_vars); C_DECLARED_LABEL_FLAG (label) = 1; /* Declared labels go in the current scope. */ bind_label (name, label, current_scope, label_vars); return label; } /* When we define a label, issue any appropriate warnings if there are any gotos earlier in the function which jump to this label. */ static void check_earlier_gotos (tree label, struct c_label_vars* label_vars) { unsigned int ix; struct c_goto_bindings *g; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { struct c_binding *b; struct c_scope *scope; /* We have a goto to this label. The goto is going forward. In g->scope, the goto is going to skip any binding which was defined after g->bindings_in_scope. */ if (g->goto_bindings.scope->has_jump_unsafe_decl) { for (b = g->goto_bindings.scope->bindings; b != g->goto_bindings.bindings_in_scope; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } /* We also need to warn about decls defined in any scopes between the scope of the label and the scope of the goto. */ for (scope = label_vars->label_bindings.scope; scope != g->goto_bindings.scope; scope = scope->outer) { gcc_assert (scope != NULL); if (scope->has_jump_unsafe_decl) { if (scope == label_vars->label_bindings.scope) b = label_vars->label_bindings.bindings_in_scope; else b = scope->bindings; for (; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } } if (g->goto_bindings.stmt_exprs > 0) { error_at (g->loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } } /* Now that the label is defined, we will issue warnings about subsequent gotos to this label when we see them. */ vec_safe_truncate (label_vars->gotos, 0); label_vars->gotos = NULL; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label, if the definition is valid. Otherwise return 0. */ tree define_label (location_t location, tree name) { /* Find any preexisting label with this name. It is an error if that label has already been defined in this function, or if there is a containing function with a declared label with the same name. */ tree label = I_LABEL_DECL (name); if (label && ((DECL_CONTEXT (label) == current_function_decl && DECL_INITIAL (label) != 0) || (DECL_CONTEXT (label) != current_function_decl && C_DECLARED_LABEL_FLAG (label)))) { error_at (location, "duplicate label %qD", label); locate_old_decl (label); return 0; } else if (label && DECL_CONTEXT (label) == current_function_decl) { struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label; /* The label has been used or declared already in this function, but not defined. Update its location to point to this definition. */ DECL_SOURCE_LOCATION (label) = location; set_spot_bindings (&label_vars->label_bindings, true); /* Issue warnings as required about any goto statements from earlier in the function. */ check_earlier_gotos (label, label_vars); } else { struct c_label_vars *label_vars; /* No label binding for that identifier; make one. */ label = make_label (location, name, true, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); } if (!in_system_header_at (input_location) && lookup_name (name)) warning_at (location, OPT_Wtraditional, "traditional C lacks a separate namespace " "for labels, identifier %qE conflicts", name); /* Mark label as having been defined. */ DECL_INITIAL (label) = error_mark_node; return label; } /* Get the bindings for a new switch statement. This is used to issue warnings as appropriate for jumps from the switch to case or default labels. */ struct c_spot_bindings * c_get_switch_bindings (void) { struct c_spot_bindings *switch_bindings; switch_bindings = XNEW (struct c_spot_bindings); set_spot_bindings (switch_bindings, true); return switch_bindings; } void c_release_switch_bindings (struct c_spot_bindings *bindings) { gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr); XDELETE (bindings); } /* This is called at the point of a case or default label to issue warnings about decls as needed. It returns true if it found an error, not just a warning. */ bool c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings, location_t switch_loc, location_t case_loc) { bool saw_error; struct c_scope *scope; saw_error = false; for (scope = current_scope; scope != switch_bindings->scope; scope = scope->outer) { struct c_binding *b; gcc_assert (scope != NULL); if (!scope->has_jump_unsafe_decl) continue; for (b = scope->bindings; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) { if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE)) { saw_error = true; error_at (case_loc, ("switch jumps into scope of identifier with " "variably modified type")); } else warning_at (case_loc, OPT_Wjump_misses_init, "switch jumps over variable initialization"); inform (switch_loc, "switch starts here"); inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here", b->decl); } } } if (switch_bindings->stmt_exprs > 0) { saw_error = true; error_at (case_loc, "switch jumps into statement expression"); inform (switch_loc, "switch starts here"); } return saw_error; } /* Given NAME, an IDENTIFIER_NODE, return the structure (or union or enum) definition for that name. If THISLEVEL_ONLY is nonzero, searches only the current_scope. CODE says which kind of type the caller wants; it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE. If PLOC is not NULL and this returns non-null, it sets *PLOC to the location where the tag was defined. If the wrong kind of type is found, an error is reported. */ static tree lookup_tag (enum tree_code code, tree name, int thislevel_only, location_t *ploc) { struct c_binding *b = I_TAG_BINDING (name); int thislevel = 0; if (!b || !b->decl) return 0; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ if (thislevel_only || TREE_CODE (b->decl) != code) { /* For our purposes, a tag in the external scope is the same as a tag in the file scope. (Primarily relevant to Objective-C and its builtin structure tags, which get pushed before the file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) thislevel = 1; } if (thislevel_only && !thislevel) return 0; if (TREE_CODE (b->decl) != code) { /* Definition isn't the kind we were looking for. */ pending_invalid_xref = name; pending_invalid_xref_location = input_location; /* If in the same binding level as a declaration as a tag of a different type, this must not be allowed to shadow that tag, so give the error immediately. (For example, "struct foo; union foo;" is invalid.) */ if (thislevel) pending_xref_error (); } if (ploc != NULL) *ploc = b->locus; return b->decl; } /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid when used in the `struct foo;' construct for shadowing. */ void pending_xref_error (void) { if (pending_invalid_xref != 0) error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag", pending_invalid_xref); pending_invalid_xref = 0; } /* Look up NAME in the current scope and its superiors in the namespace of variables, functions and typedefs. Return a ..._DECL node of some kind representing its definition, or return 0 if it is undefined. */ tree lookup_name (tree name) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b && !b->invisible) { maybe_record_typedef_use (b->decl); return b->decl; } return 0; } /* Similar to `lookup_name' but look only at the indicated scope. */ static tree lookup_name_in_scope (tree name, struct c_scope *scope) { struct c_binding *b; for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; return 0; } /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *) 0). Initialize the global scope. Make definitions for built-in primitive functions. */ void c_init_decl_processing (void) { location_t save_loc = input_location; /* Initialize reserved words for parser. */ c_parse_init (); current_function_decl = 0; gcc_obstack_init (&parser_obstack); /* Make the externals scope. */ push_scope (); external_scope = current_scope; /* Declarations from c_common_nodes_and_builtins must not be associated with this input file, lest we get differences between using and not using preprocessed headers. */ input_location = BUILTINS_LOCATION; c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ truthvalue_type_node = integer_type_node; truthvalue_true_node = integer_one_node; truthvalue_false_node = integer_zero_node; /* Even in C99, which has a real boolean type. */ pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"), boolean_type_node)); input_location = save_loc; make_fname_decl = c_make_fname_decl; start_fname_decls (); } /* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. As we don't yet implement delayed emission of static data, we mark the decl as emitted so it is not placed in the output. Anything using it must therefore pull out the STRING_CST initializer directly. FIXME. */ static tree c_make_fname_decl (location_t loc, tree id, int type_dep) { const char *name = fname_as_string (type_dep); tree decl, type, init; size_t length = strlen (name); type = build_array_type (char_type_node, build_index_type (size_int (length))); type = c_build_qualified_type (type, TYPE_QUAL_CONST); decl = build_decl (loc, VAR_DECL, id, type); TREE_STATIC (decl) = 1; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; init = build_string (length + 1, name); free (CONST_CAST (char *, name)); TREE_TYPE (init) = type; DECL_INITIAL (decl) = init; TREE_USED (decl) = 1; if (current_function_decl /* For invalid programs like this: void foo() const char* p = __FUNCTION__; the __FUNCTION__ is believed to appear in K&R style function parameter declarator. In that case we still don't have function_scope. */ && (!seen_error () || current_function_scope)) { DECL_CONTEXT (decl) = current_function_decl; bind (id, decl, current_function_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } finish_decl (decl, loc, init, NULL_TREE, NULL_TREE); return decl; } tree c_builtin_function (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); /* Should never be called on a symbol with a preexisting meaning. */ gcc_assert (!I_SYMBOL_BINDING (id)); bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } tree c_builtin_function_ext_scope (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); if (external_scope) bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. */ void shadow_tag (const struct c_declspecs *declspecs) { shadow_tag_warned (declspecs, 0); } /* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning, but no pedwarn. */ void shadow_tag_warned (const struct c_declspecs *declspecs, int warned) { bool found_tag = false; if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p) { tree value = declspecs->type; enum tree_code code = TREE_CODE (value); if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE) /* Used to test also that TYPE_SIZE (value) != 0. That caused warning for `struct foo;' at top level in the file. */ { tree name = TYPE_NAME (value); tree t; found_tag = true; if (declspecs->restrict_p) { error ("invalid use of %<restrict%>"); warned = 1; } if (name == 0) { if (warned != 1 && code != ENUMERAL_TYPE) /* Empty unnamed enum OK */ { pedwarn (input_location, 0, "unnamed struct/union that defines no instances"); warned = 1; } } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->storage_class != csc_none) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with storage class specifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with type qualifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->alignas_p) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with %<_Alignas%> " "does not redeclare tag"); warned = 1; pending_xref_error (); } else { pending_invalid_xref = 0; t = lookup_tag (code, name, 1, NULL); if (t == 0) { t = make_node (code); pushtag (input_location, name, t); } } } else { if (warned != 1 && !in_system_header_at (input_location)) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } } } else if (warned != 1 && !in_system_header_at (input_location) && declspecs->typedef_p) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } pending_invalid_xref = 0; if (declspecs->inline_p) { error ("%<inline%> in empty declaration"); warned = 1; } if (declspecs->noreturn_p) { error ("%<_Noreturn%> in empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_auto) { error ("%<auto%> in file-scope empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_register) { error ("%<register%> in file-scope empty declaration"); warned = 1; } if (!warned && !in_system_header_at (input_location) && declspecs->storage_class != csc_none) { warning (0, "useless storage class specifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->thread_p) { warning (0, "useless %qs in empty declaration", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); warned = 2; } if (!warned && !in_system_header_at (input_location) && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { warning (0, "useless type qualifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->alignas_p) { warning (0, "useless %<_Alignas%> in empty declaration"); warned = 2; } if (warned != 1) { if (!found_tag) pedwarn (input_location, 0, "empty declaration"); } } /* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_* bits. SPECS represents declaration specifiers that the grammar only permits to contain type qualifiers and attributes. */ int quals_from_declspecs (const struct c_declspecs *specs) { int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0) | (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0) | (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0) | (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0) | (ENCODE_QUAL_ADDR_SPACE (specs->address_space))); gcc_assert (!specs->type && !specs->decl_attr && specs->typespec_word == cts_none && specs->storage_class == csc_none && !specs->typedef_p && !specs->explicit_signed_p && !specs->deprecated_p && !specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p && !specs->inline_p && !specs->noreturn_p && !specs->thread_p); return quals; } /* Construct an array declarator. LOC is the location of the beginning of the array (usually the opening brace). EXPR is the expression inside [], or NULL_TREE. QUALS are the type qualifiers inside the [] (to be applied to the pointer to which a parameter array is converted). STATIC_P is true if "static" is inside the [], false otherwise. VLA_UNSPEC_P is true if the array is [*], a VLA of unspecified length which is nevertheless a complete type, false otherwise. The field for the contained declarator is left to be filled in by set_array_declarator_inner. */ struct c_declarator * build_array_declarator (location_t loc, tree expr, struct c_declspecs *quals, bool static_p, bool vla_unspec_p) { struct c_declarator *declarator = XOBNEW (&parser_obstack, struct c_declarator); declarator->id_loc = loc; declarator->kind = cdk_array; declarator->declarator = 0; declarator->u.array.dimen = expr; if (quals) { declarator->u.array.attrs = quals->attrs; declarator->u.array.quals = quals_from_declspecs (quals); } else { declarator->u.array.attrs = NULL_TREE; declarator->u.array.quals = 0; } declarator->u.array.static_p = static_p; declarator->u.array.vla_unspec_p = vla_unspec_p; if (static_p || quals != NULL) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<static%> or type " "qualifiers in parameter array declarators"); if (vla_unspec_p) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<[*]%> array declarators"); if (vla_unspec_p) { if (!current_scope->parm_flag) { /* C99 6.7.5.2p4 */ error_at (loc, "%<[*]%> not allowed in other than " "function prototype scope"); declarator->u.array.vla_unspec_p = false; return NULL; } current_scope->had_vla_unspec = true; } return declarator; } /* Set the contained declarator of an array declarator. DECL is the declarator, as constructed by build_array_declarator; INNER is what appears on the left of the []. */ struct c_declarator * set_array_declarator_inner (struct c_declarator *decl, struct c_declarator *inner) { decl->declarator = inner; return decl; } /* INIT is a constructor that forms DECL's initializer. If the final element initializes a flexible array field, add the size of that initializer to DECL's size. */ static void add_flexible_array_elts_to_size (tree decl, tree init) { tree elt, type; if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init))) return; elt = CONSTRUCTOR_ELTS (init)->last ().value; type = TREE_TYPE (elt); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == NULL_TREE && TYPE_DOMAIN (type) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE) { complete_array_type (&type, elt, false); DECL_SIZE (decl) = size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type)); DECL_SIZE_UNIT (decl) = size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type)); } } /* Decode a "typename", such as "int **", returning a ..._TYPE node. Set *EXPR, if EXPR not NULL, to any expression to be evaluated before the type name, and set *EXPR_CONST_OPERANDS, if EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may appear in a constant expression. */ tree groktypename (struct c_type_name *type_name, tree *expr, bool *expr_const_operands) { tree type; tree attrs = type_name->specs->attrs; type_name->specs->attrs = NULL_TREE; type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME, false, NULL, &attrs, expr, expr_const_operands, DEPRECATED_NORMAL); /* Apply attributes. */ decl_attributes (&type, attrs, 0); return type; } /* Wrapper for decl_attributes that adds some implicit attributes to VAR_DECLs or FUNCTION_DECLs. */ static tree c_decl_attributes (tree *node, tree attributes, int flags) { /* Add implicit "omp declare target" attribute if requested. */ if (current_omp_declare_target_attribute && ((TREE_CODE (*node) == VAR_DECL && (TREE_STATIC (*node) || DECL_EXTERNAL (*node))) || TREE_CODE (*node) == FUNCTION_DECL)) { if (TREE_CODE (*node) == VAR_DECL && ((DECL_CONTEXT (*node) && TREE_CODE (DECL_CONTEXT (*node)) == FUNCTION_DECL) || (current_function_decl && !DECL_EXTERNAL (*node)))) error ("%q+D in block scope inside of declare target directive", *node); else if (TREE_CODE (*node) == VAR_DECL && !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node))) error ("%q+D in declare target directive does not have mappable type", *node); else attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); } return decl_attributes (node, attributes, flags); } /* Decode a declarator in an ordinary declaration or data definition. This is called as soon as the type information and variable name have been parsed, before parsing the initializer if any. Here we create the ..._DECL node, fill in its type, and put it on the list of decls for the current context. The ..._DECL node is returned as the value. Exception: for arrays where the length is not specified, the type is left null, to be filled in by `finish_decl'. Function definitions do not come here; they go to start_function instead. However, external and forward declarations of functions do go through here. Structure field declarations are done by grokfield and not through here. */ tree start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs, bool initialized, tree attributes) { tree decl; tree tem; tree expr = NULL_TREE; enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, NULL, &attributes, &expr, NULL, deprecated_state); if (!decl || decl == error_mark_node) return NULL_TREE; if (expr) add_stmt (fold_convert (void_type_node, expr)); if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl))) warning (OPT_Wmain, "%q+D is usually a function", decl); if (initialized) /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell 'finish_decl' to ignore the initializer once it is parsed. */ switch (TREE_CODE (decl)) { case TYPE_DECL: error ("typedef %qD is initialized (use __typeof__ instead)", decl); initialized = 0; break; case FUNCTION_DECL: error ("function %qD is initialized like a variable", decl); initialized = 0; break; case PARM_DECL: /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */ error ("parameter %qD is initialized", decl); initialized = 0; break; default: /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ /* This can happen if the array size is an undefined macro. We already gave a warning, so we don't need another one. */ if (TREE_TYPE (decl) == error_mark_node) initialized = 0; else if (COMPLETE_TYPE_P (TREE_TYPE (decl))) { /* A complete type is ok if size is fixed. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST || C_DECL_VARIABLE_SIZE (decl)) { error ("variable-sized object may not be initialized"); initialized = 0; } } else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) { error ("variable %qD has initializer but incomplete type", decl); initialized = 0; } else if (C_DECL_VARIABLE_SIZE (decl)) { /* Although C99 is unclear about whether incomplete arrays of VLAs themselves count as VLAs, it does not make sense to permit them to be initialized given that ordinary VLAs may not be initialized. */ error ("variable-sized object may not be initialized"); initialized = 0; } } if (initialized) { if (current_scope == file_scope) TREE_STATIC (decl) = 1; /* Tell 'pushdecl' this is an initialized decl even though we don't yet have the initializer expression. Also tell 'finish_decl' it may store the real initializer. */ DECL_INITIAL (decl) = error_mark_node; } /* If this is a function declaration, write a record describing it to the prototypes file (if requested). */ if (TREE_CODE (decl) == FUNCTION_DECL) gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl))); /* ANSI specifies that a tentative definition which is not merged with a non-tentative definition behaves exactly like a definition with an initializer equal to zero. (Section 3.7.2) -fno-common gives strict ANSI behavior, though this tends to break a large body of code that grew up without this rule. Thread-local variables are never common, since there's no entrenched body of code to break, and it allows more efficient variable references in the presence of dynamic linking. */ if (TREE_CODE (decl) == VAR_DECL && !initialized && TREE_PUBLIC (decl) && !DECL_THREAD_LOCAL_P (decl) && !flag_no_common) DECL_COMMON (decl) = 1; /* Set attributes here so if duplicate decl, will have proper attributes. */ c_decl_attributes (&decl, attributes, 0); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl)) || current_function_decl)) { if (declspecs->storage_class == csc_auto && current_scope != file_scope) ; else if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl); } if (TREE_CODE (decl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (decl))) { struct c_declarator *ce = declarator; if (ce->kind == cdk_pointer) ce = declarator->declarator; if (ce->kind == cdk_function) { tree args = ce->u.arg_info->parms; for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (type && INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning (OPT_Wattributes, "inline function %q+D given attribute noinline", decl); /* C99 6.7.4p3: An inline definition of a function with external linkage shall not contain a definition of a modifiable object with static storage duration... */ if (TREE_CODE (decl) == VAR_DECL && current_scope != file_scope && TREE_STATIC (decl) && !TREE_READONLY (decl) && DECL_DECLARED_INLINE_P (current_function_decl) && DECL_EXTERNAL (current_function_decl)) record_inline_static (input_location, current_function_decl, decl, csi_modifiable); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL)) objc_check_global_decl (decl); /* Add this decl to the current scope. TEM may equal DECL or it may be a previous decl of the same name. */ tem = pushdecl (decl); if (initialized && DECL_EXTERNAL (tem)) { DECL_EXTERNAL (tem) = 0; TREE_STATIC (tem) = 1; } return tem; } /* Subroutine of finish_decl. TYPE is the type of an uninitialized object DECL or the non-array element type if DECL is an uninitialized array. If that type has a const member, diagnose this. */ static void diagnose_uninitialized_cst_member (tree decl, tree type) { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { tree field_type; if (TREE_CODE (field) != FIELD_DECL) continue; field_type = strip_array_types (TREE_TYPE (field)); if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST) { warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const member in %qT is invalid in C++", strip_array_types (TREE_TYPE (decl))); inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field); } if (TREE_CODE (field_type) == RECORD_TYPE || TREE_CODE (field_type) == UNION_TYPE) diagnose_uninitialized_cst_member (decl, field_type); } } /* Finish processing of a declaration; install its initial value. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT_LOC is the location of the initial value. */ void finish_decl (tree decl, location_t init_loc, tree init, tree origtype, tree asmspec_tree) { tree type; bool was_incomplete = (DECL_SIZE (decl) == 0); const char *asmspec = 0; /* If a name was specified, get the string. */ if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL) && DECL_FILE_SCOPE_P (decl)) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl) && global_bindings_p ()) /* So decl is a global variable. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); /* If `start_decl' didn't like having an initialization, ignore it now. */ if (init != 0 && DECL_INITIAL (decl) == 0) init = 0; /* Don't crash if parm is initialized. */ if (TREE_CODE (decl) == PARM_DECL) init = 0; if (init) store_init_value (init_loc, decl, init, origtype); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == FIELD_DECL)) objc_check_decl (decl); type = TREE_TYPE (decl); /* Deduce size of array from initialization, if not already known. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0 && TREE_CODE (decl) != TYPE_DECL) { bool do_default = (TREE_STATIC (decl) /* Even if pedantic, an external linkage array may have incomplete type at first. */ ? pedantic && !TREE_PUBLIC (decl) : !DECL_EXTERNAL (decl)); int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), do_default); /* Get the completed type made by complete_array_type. */ type = TREE_TYPE (decl); switch (failure) { case 1: error ("initializer fails to determine size of %q+D", decl); break; case 2: if (do_default) error ("array size missing in %q+D", decl); /* If a `static' var's size isn't known, make it extern as well as static, so it does not get allocated. If it is not `static', then do not mark extern; finish_incomplete_decl will give it a default size and it will get allocated. */ else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl)) DECL_EXTERNAL (decl) = 1; break; case 3: error ("zero or negative size array %q+D", decl); break; case 0: /* For global variables, update the copy of the type that exists in the binding. */ if (TREE_PUBLIC (decl)) { struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl)); while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { if (b_ext->u.type && comptypes (b_ext->u.type, type)) b_ext->u.type = composite_type (b_ext->u.type, type); else b_ext->u.type = type; } } break; default: gcc_unreachable (); } if (DECL_INITIAL (decl)) TREE_TYPE (DECL_INITIAL (decl)) = type; relayout_decl (decl); } if (TREE_CODE (decl) == VAR_DECL) { if (init && TREE_CODE (init) == CONSTRUCTOR) add_flexible_array_elts_to_size (decl, init); if (DECL_SIZE (decl) == 0 && TREE_TYPE (decl) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (decl))) layout_decl (decl, 0); if (DECL_SIZE (decl) == 0 /* Don't give an error if we already gave one earlier. */ && TREE_TYPE (decl) != error_mark_node && (TREE_STATIC (decl) /* A static variable with an incomplete type is an error if it is initialized. Also if it is not file scope. Otherwise, let it through, but if it is not `extern' then it may cause an error message later. */ ? (DECL_INITIAL (decl) != 0 || !DECL_FILE_SCOPE_P (decl)) /* An automatic variable with an incomplete type is an error. */ : !DECL_EXTERNAL (decl))) { error ("storage size of %q+D isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl)) && DECL_SIZE (decl) != 0) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else { error ("storage size of %q+D isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } if (TREE_USED (type)) { TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; } } /* If this is a function and an assembler name is specified, reset DECL_RTL so we can give it its new name. Also, update builtin_decl if it was a normal built-in. */ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec) { if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } /* If #pragma weak was used, mark the decl weak now. */ maybe_apply_pragma_weak (decl); /* Output the assembler code and/or RTL code for variables and functions, unless the type is an undefined structure or union. If not, it will get done when the type is completed. */ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL) { /* Determine the ELF visibility. */ if (TREE_PUBLIC (decl)) c_determine_visibility (decl); /* This is a no-op in c-lang.c or something real in objc-act.c. */ if (c_dialect_objc ()) objc_check_decl (decl); if (asmspec) { /* If this is not a static variable, issue a warning. It doesn't make any sense to give an ASMSPEC for an ordinary, non-register local variable. Historically, GCC has accepted -- but ignored -- the ASMSPEC in this case. */ if (!DECL_FILE_SCOPE_P (decl) && TREE_CODE (decl) == VAR_DECL && !C_DECL_REGISTER (decl) && !TREE_STATIC (decl)) warning (0, "ignoring asm-specifier for non-static local " "variable %q+D", decl); else set_user_assembler_name (decl, asmspec); } if (DECL_FILE_SCOPE_P (decl)) { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; if (asmspec && C_DECL_REGISTER (decl)) DECL_HARD_REGISTER (decl) = 1; rest_of_decl_compilation (decl, true, 0); } else { /* In conjunction with an ASMSPEC, the `register' keyword indicates that we should place the variable in a particular register. */ if (asmspec && C_DECL_REGISTER (decl)) { DECL_HARD_REGISTER (decl) = 1; /* This cannot be done for a structure with volatile fields, on which DECL_REGISTER will have been reset. */ if (!DECL_REGISTER (decl)) error ("cannot put object with volatile field into register"); } if (TREE_CODE (decl) != FUNCTION_DECL) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ /* Note that DECL_SIZE can be null due to errors. */ if (DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } } if (!DECL_FILE_SCOPE_P (decl)) { /* Recompute the RTL of a local array now if it used to be an incomplete type. */ if (was_incomplete && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) { /* If we used it already as memory, it must stay in memory. */ TREE_ADDRESSABLE (decl) = TREE_USED (decl); /* If it's still incomplete now, no init will save it. */ if (DECL_SIZE (decl) == 0) DECL_INITIAL (decl) = 0; } } } if (TREE_CODE (decl) == TYPE_DECL) { if (!DECL_FILE_SCOPE_P (decl) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0); } /* Install a cleanup (aka destructor) if one was given. */ if (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl)) { tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); if (attr) { tree cleanup_id = TREE_VALUE (TREE_VALUE (attr)); tree cleanup_decl = lookup_name (cleanup_id); tree cleanup; vec<tree, va_gc> *v; /* Build "cleanup(&decl)" for the destructor. */ cleanup = build_unary_op (input_location, ADDR_EXPR, decl, 0); vec_alloc (v, 1); v->quick_push (cleanup); cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl), vNULL, cleanup_decl, v, NULL); vec_free (v); /* Don't warn about decl unused; the cleanup uses it. */ TREE_USED (decl) = 1; TREE_USED (cleanup_decl) = 1; DECL_READ_P (decl) = 1; push_cleanup (decl, cleanup, false); } } if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl) && DECL_INITIAL (decl) == NULL_TREE) { type = strip_array_types (type); if (TREE_READONLY (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const %qD is invalid in C++", decl); else if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (type)) diagnose_uninitialized_cst_member (decl, type); } invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* Given a parsed parameter declaration, decode it into a PARM_DECL. EXPR is NULL or a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ tree grokparm (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); return decl; } /* Given a parsed parameter declaration, decode it into a PARM_DECL and push that on the current scope. EXPR is a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ void push_parm_decl (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl; decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); decl = pushdecl (decl); finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE); } /* Mark all the parameter declarations to date as forward decls. Also diagnose use of this extension. */ void mark_forward_parm_decls (void) { struct c_binding *b; if (pedantic && !current_scope->warned_forward_parm_decls) { pedwarn (input_location, OPT_Wpedantic, "ISO C forbids forward parameter declarations"); current_scope->warned_forward_parm_decls = true; } for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL) TREE_ASM_WRITTEN (b->decl) = 1; } /* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound literal, which may be an incomplete array type completed by the initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound literal. NON_CONST is true if the initializers contain something that cannot occur in a constant expression. */ tree build_compound_literal (location_t loc, tree type, tree init, bool non_const) { /* We do not use start_decl here because we have a type, not a declarator; and do not use finish_decl because the decl should be stored inside the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */ tree decl; tree complit; tree stmt; if (type == error_mark_node || init == error_mark_node) return error_mark_node; decl = build_decl (loc, VAR_DECL, NULL_TREE, type); DECL_EXTERNAL (decl) = 0; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = (current_scope == file_scope); DECL_CONTEXT (decl) = current_function_decl; TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; TREE_TYPE (decl) = type; TREE_READONLY (decl) = (TYPE_READONLY (type) || (TREE_CODE (type) == ARRAY_TYPE && TYPE_READONLY (TREE_TYPE (type)))); store_init_value (loc, decl, init, NULL_TREE); if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type)) { int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), true); /* If complete_array_type returns 3, it means that the initial value of the compound literal is empty. Allow it. */ gcc_assert (failure == 0 || failure == 3); type = TREE_TYPE (decl); TREE_TYPE (DECL_INITIAL (decl)) = type; } if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { c_incomplete_type_error (NULL_TREE, type); return error_mark_node; } stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt); TREE_SIDE_EFFECTS (complit) = 1; layout_decl (decl, 0); if (TREE_STATIC (decl)) { /* This decl needs a name for the assembler output. */ set_compound_literal_name (decl); DECL_DEFER_OUTPUT (decl) = 1; DECL_COMDAT (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; pushdecl (decl); rest_of_decl_compilation (decl, 1, 0); } if (non_const) { complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit); C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1; } return complit; } /* Check the type of a compound literal. Here we just check that it is valid for C++. */ void check_compound_literal_type (location_t loc, struct c_type_name *type_name) { if (warn_cxx_compat && (type_name->specs->typespec_kind == ctsk_tagdef || type_name->specs->typespec_kind == ctsk_tagfirstref)) warning_at (loc, OPT_Wc___compat, "defining a type in a compound literal is invalid in C++"); } /* Determine whether TYPE is a structure with a flexible array member, or a union containing such a structure (possibly recursively). */ static bool flexible_array_type_p (tree type) { tree x; switch (TREE_CODE (type)) { case RECORD_TYPE: x = TYPE_FIELDS (type); if (x == NULL_TREE) return false; while (DECL_CHAIN (x) != NULL_TREE) x = DECL_CHAIN (x); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) return true; return false; case UNION_TYPE: for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x)) { if (flexible_array_type_p (TREE_TYPE (x))) return true; } return false; default: return false; } } /* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME, replacing with appropriate values if they are invalid. */ static void check_bitfield_type_and_width (tree *type, tree *width, tree orig_name) { tree type_mv; unsigned int max_width; unsigned HOST_WIDE_INT w; const char *name = (orig_name ? identifier_to_locale (IDENTIFIER_POINTER (orig_name)) : _("<anonymous>")); /* Detect and ignore out of range field width and process valid field widths. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (*width))) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } else { if (TREE_CODE (*width) != INTEGER_CST) { *width = c_fully_fold (*width, false, NULL); if (TREE_CODE (*width) == INTEGER_CST) pedwarn (input_location, OPT_Wpedantic, "bit-field %qs width not an integer constant expression", name); } if (TREE_CODE (*width) != INTEGER_CST) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } constant_expression_warning (*width); if (tree_int_cst_sgn (*width) < 0) { error ("negative width in bit-field %qs", name); *width = integer_one_node; } else if (integer_zerop (*width) && orig_name) { error ("zero width for bit-field %qs", name); *width = integer_one_node; } } /* Detect invalid bit-field type. */ if (TREE_CODE (*type) != INTEGER_TYPE && TREE_CODE (*type) != BOOLEAN_TYPE && TREE_CODE (*type) != ENUMERAL_TYPE) { error ("bit-field %qs has invalid type", name); *type = unsigned_type_node; } type_mv = TYPE_MAIN_VARIANT (*type); if (!in_system_header_at (input_location) && type_mv != integer_type_node && type_mv != unsigned_type_node && type_mv != boolean_type_node) pedwarn_c90 (input_location, OPT_Wpedantic, "type of bit-field %qs is a GCC extension", name); max_width = TYPE_PRECISION (*type); if (0 < compare_tree_int (*width, max_width)) { error ("width of %qs exceeds its type", name); w = max_width; *width = build_int_cst (integer_type_node, w); } else w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning (0, "%qs is narrower than values of its type", name); } } /* Print warning about variable length array if necessary. */ static void warn_variable_length_array (tree name, tree size) { if (TREE_CONSTANT (size)) { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array %qE whose size " "can%'t be evaluated", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array " "whose size can%'t be evaluated"); } else { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable length array %qE", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable " "length array"); } } /* Print warning about defaulting to int if necessary. */ static void warn_defaults_to (location_t location, int opt, const char *gmsgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, gmsgid); diagnostic_set_info (&diagnostic, gmsgid, &ap, location, flag_isoc99 ? DK_PEDWARN : DK_WARNING); diagnostic.option_index = opt; report_diagnostic (&diagnostic); va_end (ap); } /* Given declspecs and a declarator, determine the name and type of the object declared and construct a ..._DECL node for it. (In one case we can return a ..._TYPE node instead. For invalid input we sometimes return 0.) DECLSPECS is a c_declspecs structure for the declaration specifiers. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. INITIALIZED is true if the decl has an initializer. WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node representing the width of the bit-field. DECL_ATTRS points to the list of attributes that should be added to this decl. Any nested attributes that belong on the decl itself will be added to this list. If EXPR is not NULL, any expressions that need to be evaluated as part of evaluating variably modified types will be stored in *EXPR. If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be set to indicate whether operands in *EXPR can be used in constant expressions. DEPRECATED_STATE is a deprecated_states value indicating whether deprecation warnings should be suppressed. In the TYPENAME case, DECLARATOR is really an absolute declarator. It may also be so in the PARM case, for a prototype where the argument type is specified but not the name. This function is where the complicated C meanings of `static' and `extern' are interpreted. */ static tree grokdeclarator (const struct c_declarator *declarator, struct c_declspecs *declspecs, enum decl_context decl_context, bool initialized, tree *width, tree *decl_attrs, tree *expr, bool *expr_const_operands, enum deprecated_states deprecated_state) { tree type = declspecs->type; bool threadp = declspecs->thread_p; enum c_storage_class storage_class = declspecs->storage_class; int constp; int restrictp; int volatilep; int atomicp; int type_quals = TYPE_UNQUALIFIED; tree name = NULL_TREE; bool funcdef_flag = false; bool funcdef_syntax = false; bool size_varies = false; tree decl_attr = declspecs->decl_attr; int array_ptr_quals = TYPE_UNQUALIFIED; tree array_ptr_attrs = NULL_TREE; int array_parm_static = 0; bool array_parm_vla_unspec_p = false; tree returned_attrs = NULL_TREE; bool bitfield = width != NULL; tree element_type; tree orig_qual_type = NULL; size_t orig_qual_indirect = 0; struct c_arg_info *arg_info = 0; addr_space_t as1, as2, address_space; location_t loc = UNKNOWN_LOCATION; const char *errmsg; tree expr_dummy; bool expr_const_operands_dummy; enum c_declarator_kind first_non_attr_kind; unsigned int alignas_align = 0; if (TREE_CODE (type) == ERROR_MARK) return error_mark_node; if (expr == NULL) expr = &expr_dummy; if (expr_const_operands == NULL) expr_const_operands = &expr_const_operands_dummy; *expr = declspecs->expr; *expr_const_operands = declspecs->expr_const_operands; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; /* Look inside a declarator for the name being declared and get it as an IDENTIFIER_NODE, for an error message. */ { const struct c_declarator *decl = declarator; first_non_attr_kind = cdk_attrs; while (decl) switch (decl->kind) { case cdk_array: loc = decl->id_loc; /* FALL THRU. */ case cdk_function: case cdk_pointer: funcdef_syntax = (decl->kind == cdk_function); if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = decl->declarator; break; case cdk_attrs: decl = decl->declarator; break; case cdk_id: loc = decl->id_loc; if (decl->u.id) name = decl->u.id; if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = 0; break; default: gcc_unreachable (); } if (name == 0) { gcc_assert (decl_context == PARM || decl_context == TYPENAME || (decl_context == FIELD && declarator->kind == cdk_id)); gcc_assert (!initialized); } } /* A function definition's declarator must have the form of a function declarator. */ if (funcdef_flag && !funcdef_syntax) return 0; /* If this looks like a function definition, make it one, even if it occurs where parms are expected. Then store_parm_decls will reject it and not use it as a parm. */ if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag) decl_context = PARM; if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (declspecs->type, declspecs->decl_attr); if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope && variably_modified_type_p (type, NULL_TREE)) { if (name) error_at (loc, "variably modified %qE at file scope", name); else error_at (loc, "variably modified field at file scope"); type = integer_type_node; } size_varies = C_TYPE_VARIABLE_SIZE (type) != 0; /* Diagnose defaulting to "int". */ if (declspecs->default_int_p && !in_system_header_at (input_location)) { /* Issue a warning if this is an ISO C 99 program or if -Wreturn-type and this is a function, or if -Wimplicit; prefer the former warning since it is more explicit. */ if ((warn_implicit_int || warn_return_type || flag_isoc99) && funcdef_flag) warn_about_return_type = 1; else { if (name) warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in declaration " "of %qE", name); else warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in type name"); } } /* Adjust the type if a bit-field is being declared, -funsigned-bitfields applied and the type is not explicitly "signed". */ if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p && TREE_CODE (type) == INTEGER_TYPE) type = unsigned_type_for (type); /* Figure out the type qualifiers for the declaration. There are two ways a declaration can become qualified. One is something like `const int i' where the `const' is explicit. Another is something like `typedef const int CI; CI i' where the type of the declaration contains the `const'. A third possibility is that there is a type qualifier on the element type of a typedefed array type, in which case we should extract that qualifier so that c_apply_type_quals_to_decl receives the full list of qualifiers to work with (C90 is not entirely clear about whether duplicate qualifiers should be diagnosed in this case, but it seems most appropriate to do so). */ element_type = strip_array_types (type); constp = declspecs->const_p + TYPE_READONLY (element_type); restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type); volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type); atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type); as1 = declspecs->address_space; as2 = TYPE_ADDR_SPACE (element_type); address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1; if (constp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>"); if (restrictp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>"); if (volatilep > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>"); if (atomicp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>"); if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2) error_at (loc, "conflicting named address spaces (%s vs %s)", c_addr_space_name (as1), c_addr_space_name (as2)); if ((TREE_CODE (type) == ARRAY_TYPE || first_non_attr_kind == cdk_array) && TYPE_QUALS (element_type)) { orig_qual_type = type; type = TYPE_MAIN_VARIANT (type); } type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0) | (atomicp ? TYPE_QUAL_ATOMIC : 0) | ENCODE_QUAL_ADDR_SPACE (address_space)); if (type_quals != TYPE_QUALS (element_type)) orig_qual_type = NULL_TREE; /* Applying the _Atomic qualifier to an array type (through the use of typedefs or typeof) must be detected here. If the qualifier is introduced later, any appearance of applying it to an array is actually applying it to an element of that array. */ if (atomicp && TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (funcdef_flag && (threadp || storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef)) { if (storage_class == csc_auto) pedwarn (loc, (current_scope == file_scope) ? 0 : OPT_Wpedantic, "function definition declared %<auto%>"); if (storage_class == csc_register) error_at (loc, "function definition declared %<register%>"); if (storage_class == csc_typedef) error_at (loc, "function definition declared %<typedef%>"); if (threadp) error_at (loc, "function definition declared %qs", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; if (storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef) storage_class = csc_none; } else if (decl_context != NORMAL && (storage_class != csc_none || threadp)) { if (decl_context == PARM && storage_class == csc_register) ; else { switch (decl_context) { case FIELD: if (name) error_at (loc, "storage class specified for structure " "field %qE", name); else error_at (loc, "storage class specified for structure field"); break; case PARM: if (name) error_at (loc, "storage class specified for parameter %qE", name); else error_at (loc, "storage class specified for unnamed parameter"); break; default: error_at (loc, "storage class specified for typename"); break; } storage_class = csc_none; threadp = false; } } else if (storage_class == csc_extern && initialized && !funcdef_flag) { /* 'extern' with initialization is invalid if not at file scope. */ if (current_scope == file_scope) { /* It is fine to have 'extern const' when compiling at C and C++ intersection. */ if (!(warn_cxx_compat && constp)) warning_at (loc, 0, "%qE initialized and declared %<extern%>", name); } else error_at (loc, "%qE has both %<extern%> and initializer", name); } else if (current_scope == file_scope) { if (storage_class == csc_auto) error_at (loc, "file-scope declaration of %qE specifies %<auto%>", name); if (pedantic && storage_class == csc_register) pedwarn (input_location, OPT_Wpedantic, "file-scope declaration of %qE specifies %<register%>", name); } else { if (storage_class == csc_extern && funcdef_flag) error_at (loc, "nested function %qE declared %<extern%>", name); else if (threadp && storage_class == csc_none) { error_at (loc, "function-scope %qE implicitly auto and declared " "%qs", name, declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; } } /* Now figure out the structure of the declarator proper. Descend through it, creating more complex types, until we reach the declared identifier (or NULL_TREE, in an absolute declarator). At each stage we maintain an unqualified version of the type together with any qualifiers that should be applied to it with c_build_qualified_type; this way, array types including multidimensional array types are first built up in unqualified form and then the qualified form is created with TYPE_MAIN_VARIANT pointing to the unqualified form. */ while (declarator && declarator->kind != cdk_id) { if (type == error_mark_node) { declarator = declarator->declarator; continue; } /* Each level of DECLARATOR is either a cdk_array (for ...[..]), a cdk_pointer (for *...), a cdk_function (for ...(...)), a cdk_attrs (for nested attributes), or a cdk_id (for the name being declared or the place in an absolute declarator where the name was omitted). For the last case, we have just exited the loop. At this point, TYPE is the type of elements of an array, or for a function to return, or for a pointer to point to. After this sequence of ifs, TYPE is the type of the array or function or pointer, and DECLARATOR has had its outermost layer removed. */ if (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static) { /* Only the innermost declarator (making a parameter be of array type which is converted to pointer type) may have static or type qualifiers. */ error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } switch (declarator->kind) { case cdk_attrs: { /* A declarator with embedded attributes. */ tree attrs = declarator->u.attrs; const struct c_declarator *inner_decl; int attr_flags = 0; declarator = declarator->declarator; inner_decl = declarator; while (inner_decl->kind == cdk_attrs) inner_decl = inner_decl->declarator; if (inner_decl->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; else if (inner_decl->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; else if (inner_decl->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); break; } case cdk_array: { tree itype = NULL_TREE; tree size = declarator->u.array.dimen; /* The index is a signed object `sizetype' bits wide. */ tree index_type = c_common_signed_type (sizetype); array_ptr_quals = declarator->u.array.quals; array_ptr_attrs = declarator->u.array.attrs; array_parm_static = declarator->u.array.static_p; array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p; declarator = declarator->declarator; /* Check for some types that there cannot be arrays of. */ if (VOID_TYPE_P (type)) { if (name) error_at (loc, "declaration of %qE as array of voids", name); else error_at (loc, "declaration of type name as array of voids"); type = error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "declaration of %qE as array of functions", name); else error_at (loc, "declaration of type name as array of " "functions"); type = error_mark_node; } if (pedantic && !in_system_header_at (input_location) && flexible_array_type_p (type)) pedwarn (loc, OPT_Wpedantic, "invalid use of structure with flexible array member"); if (size == error_mark_node) type = error_mark_node; if (type == error_mark_node) continue; /* If size was specified, set ITYPE to a range-type for that size. Otherwise, ITYPE remains null. finish_decl may figure it out from an initial value. */ if (size) { bool size_maybe_const = true; bool size_int_const = (TREE_CODE (size) == INTEGER_CST && !TREE_OVERFLOW (size)); bool this_size_varies = false; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (size); if (!INTEGRAL_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has non-integer type", name); else error_at (loc, "size of unnamed array has non-integer type"); size = integer_one_node; } size = c_fully_fold (size, false, &size_maybe_const); if (pedantic && size_maybe_const && integer_zerop (size)) { if (name) pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array %qE", name); else pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array"); } if (TREE_CODE (size) == INTEGER_CST && size_maybe_const) { constant_expression_warning (size); if (tree_int_cst_sgn (size) < 0) { if (name) error_at (loc, "size of array %qE is negative", name); else error_at (loc, "size of unnamed array is negative"); size = integer_one_node; } /* Handle a size folded to an integer constant but not an integer constant expression. */ if (!size_int_const) { /* If this is a file scope declaration of an ordinary identifier, this is invalid code; diagnosing it here and not subsequently treating the type as variable-length avoids more confusing diagnostics later. */ if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) pedwarn (input_location, 0, "variably modified %qE at file scope", name); else this_size_varies = size_varies = true; warn_variable_length_array (name, size); } } else if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) { error_at (loc, "variably modified %qE at file scope", name); size = integer_one_node; } else { /* Make sure the array size remains visibly nonconstant even if it is (eg) a const variable with known value. */ this_size_varies = size_varies = true; warn_variable_length_array (name, size); if (flag_sanitize & SANITIZE_VLA && decl_context == NORMAL && do_ubsan_in_current_function ()) { /* Evaluate the array size only once. */ size = c_save_expr (size); size = c_fully_fold (size, false, NULL); size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size), ubsan_instrument_vla (loc, size), size); } } if (integer_zerop (size) && !this_size_varies) { /* A zero-length array cannot be represented with an unsigned index type, which is what we'll get with build_index_type. Create an open-ended range instead. */ itype = build_range_type (sizetype, size, NULL_TREE); } else { /* Arrange for the SAVE_EXPR on the inside of the MINUS_EXPR, which allows the -1 to get folded with the +1 that happens when building TYPE_SIZE. */ if (size_varies) size = save_expr (size); if (this_size_varies && TREE_CODE (size) == INTEGER_CST) size = build2 (COMPOUND_EXPR, TREE_TYPE (size), integer_zero_node, size); /* Compute the maximum valid index, that is, size - 1. Do the calculation in index_type, so that if it is a variable the computations will be done in the proper mode. */ itype = fold_build2_loc (loc, MINUS_EXPR, index_type, convert (index_type, size), convert (index_type, size_one_node)); /* The above overflows when size does not fit in index_type. ??? While a size of INT_MAX+1 technically shouldn't cause an overflow (because we subtract 1), handling this case seems like an unnecessary complication. */ if (TREE_CODE (size) == INTEGER_CST && !int_fits_type_p (size, index_type)) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); type = error_mark_node; continue; } itype = build_index_type (itype); } if (this_size_varies) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (size), *expr, size); else *expr = size; *expr_const_operands &= size_maybe_const; } } else if (decl_context == FIELD) { bool flexible_array_member = false; if (array_parm_vla_unspec_p) /* Field names can in fact have function prototype scope so [*] is disallowed here through making the field variably modified, not through being something other than a declaration with function prototype scope. */ size_varies = true; else { const struct c_declarator *t = declarator; while (t->kind == cdk_attrs) t = t->declarator; flexible_array_member = (t->kind == cdk_id); } if (flexible_array_member && !in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); /* ISO C99 Flexible array members are effectively identical to GCC's zero-length array extension. */ if (flexible_array_member || array_parm_vla_unspec_p) itype = build_range_type (sizetype, size_zero_node, NULL_TREE); } else if (decl_context == PARM) { if (array_parm_vla_unspec_p) { itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } else if (decl_context == TYPENAME) { if (array_parm_vla_unspec_p) { /* C99 6.7.5.2p4 */ warning (0, "%<[*]%> not in a declaration"); /* We use this to avoid messing up with incomplete array types of the same type, that would otherwise be modified below. */ itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } /* Complain about arrays of incomplete types. */ if (!COMPLETE_TYPE_P (type)) { error_at (loc, "array type has incomplete element type %qT", type); type = error_mark_node; } else /* When itype is NULL, a shared incomplete array type is returned for all array of a given type. Elsewhere we make sure we don't complete that type before copying it, but here we want to make sure we don't ever modify the shared type, so we gcc_assert (itype) below. */ { addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type)) type = build_qualified_type (type, ENCODE_QUAL_ADDR_SPACE (as)); type = build_array_type (type, itype); } if (type != error_mark_node) { if (size_varies) { /* It is ok to modify type here even if itype is NULL: if size_varies, we're in a multi-dimensional array and the inner type has variable size, so the enclosing shared array type must too. */ if (size && TREE_CODE (size) == INTEGER_CST) type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); C_TYPE_VARIABLE_SIZE (type) = 1; } /* The GCC extension for zero-length arrays differs from ISO flexible array members in that sizeof yields zero. */ if (size && integer_zerop (size)) { gcc_assert (itype); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (array_parm_vla_unspec_p) { gcc_assert (itype); /* The type is complete. C99 6.7.5.2p4 */ type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } } if (decl_context != PARM && (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static)) { error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } orig_qual_indirect++; break; } case cdk_function: { /* Say it's a definition only for the declarator closest to the identifier, apart possibly from some attributes. */ bool really_funcdef = false; tree arg_types; orig_qual_type = NULL_TREE; if (funcdef_flag) { const struct c_declarator *t = declarator->declarator; while (t->kind == cdk_attrs) t = t->declarator; really_funcdef = (t->kind == cdk_id); } /* Declaring a function type. Make sure we have a valid type for the function to return. */ if (type == error_mark_node) continue; size_varies = false; /* Warn about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "%qE declared as function returning a " "function", name); else error_at (loc, "type name declared as function " "returning a function"); type = integer_type_node; } if (TREE_CODE (type) == ARRAY_TYPE) { if (name) error_at (loc, "%qE declared as function returning an array", name); else error_at (loc, "type name declared as function returning " "an array"); type = integer_type_node; } errmsg = targetm.invalid_return_type (type); if (errmsg) { error (errmsg); type = integer_type_node; } /* Construct the function type and go to the next inner layer of declarator. */ arg_info = declarator->u.arg_info; arg_types = grokparms (arg_info, really_funcdef); /* Type qualifiers before the return type of the function qualify the return type, not the function type. */ if (type_quals) { /* Type qualifiers on a function return type are normally permitted by the standard but have no effect, so give a warning at -Wreturn-type. Qualifiers on a void return type are banned on function definitions in ISO C; GCC used to used them for noreturn functions. */ if (VOID_TYPE_P (type) && really_funcdef) pedwarn (loc, 0, "function definition has qualified void return type"); else warning_at (loc, OPT_Wignored_qualifiers, "type qualifiers ignored on function return type"); type = c_build_qualified_type (type, type_quals); } type_quals = TYPE_UNQUALIFIED; type = build_function_type (type, arg_types); declarator = declarator->declarator; /* Set the TYPE_CONTEXTs for each tagged type which is local to the formal parameter list of this FUNCTION_TYPE to point to the FUNCTION_TYPE node itself. */ { c_arg_tag *tag; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) TYPE_CONTEXT (tag->type) = type; } break; } case cdk_pointer: { /* Merge any constancy or volatility into the target type for the pointer. */ if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); orig_qual_type = NULL_TREE; size_varies = false; /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We handle the NORMAL and FIELD contexts here by attaching an artificial TYPE_DECL to such pointed-to type. This forces the sizes evaluation at a safe point and ensures it is not deferred until e.g. within a deeper conditional context. We expect nothing to be needed here for PARM or TYPENAME. Pushing a TYPE_DECL at this point for TYPENAME would actually be incorrect, as we might be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the fake TYPE_DECL in the enclosing context would force the size evaluation prior to the side effects. */ if (!TYPE_NAME (type) && (decl_context == NORMAL || decl_context == FIELD) && variably_modified_type_p (type, NULL_TREE)) { tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl) = 1; pushdecl (decl); finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE); TYPE_NAME (type) = decl; } type = c_build_pointer_type (type); /* Process type qualifiers (such as const or volatile) that were given inside the `*'. */ type_quals = declarator->u.pointer_quals; declarator = declarator->declarator; break; } default: gcc_unreachable (); } } *decl_attrs = chainon (returned_attrs, *decl_attrs); /* Now TYPE has the actual type, apart from any qualifiers in TYPE_QUALS. */ /* Warn about address space used for things other than static memory or pointers. */ address_space = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (address_space)) { if (decl_context == NORMAL) { switch (storage_class) { case csc_auto: error ("%qs combined with %<auto%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_register: error ("%qs combined with %<register%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_none: if (current_function_scope) { error ("%qs specified for auto variable %qE", c_addr_space_name (address_space), name); break; } break; case csc_static: case csc_extern: case csc_typedef: break; default: gcc_unreachable (); } } else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE) { if (name) error ("%qs specified for parameter %qE", c_addr_space_name (address_space), name); else error ("%qs specified for unnamed parameter", c_addr_space_name (address_space)); } else if (decl_context == FIELD) { if (name) error ("%qs specified for structure field %qE", c_addr_space_name (address_space), name); else error ("%qs specified for structure field", c_addr_space_name (address_space)); } } /* Check the type and width of a bit-field. */ if (bitfield) { check_bitfield_type_and_width (&type, width, name); /* C11 makes it implementation-defined (6.7.2.1#5) whether atomic types are permitted for bit-fields; we have no code to make bit-field accesses atomic, so disallow them. */ if (type_quals & TYPE_QUAL_ATOMIC) { if (name) error ("bit-field %qE has atomic type", name); else error ("bit-field has atomic type"); type_quals &= ~TYPE_QUAL_ATOMIC; } } /* Reject invalid uses of _Alignas. */ if (declspecs->alignas_p) { if (storage_class == csc_typedef) error_at (loc, "alignment specified for typedef %qE", name); else if (storage_class == csc_register) error_at (loc, "alignment specified for %<register%> object %qE", name); else if (decl_context == PARM) { if (name) error_at (loc, "alignment specified for parameter %qE", name); else error_at (loc, "alignment specified for unnamed parameter"); } else if (bitfield) { if (name) error_at (loc, "alignment specified for bit-field %qE", name); else error_at (loc, "alignment specified for unnamed bit-field"); } else if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "alignment specified for function %qE", name); else if (declspecs->align_log != -1) { alignas_align = 1U << declspecs->align_log; if (alignas_align < min_align_of_type (type)) { if (name) error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of %qE", name); else error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of unnamed field"); alignas_align = 0; } } } /* Did array size calculations overflow or does the array cover more than half of the address-space? */ if (TREE_CODE (type) == ARRAY_TYPE && COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST && ! valid_constant_size_p (TYPE_SIZE_UNIT (type))) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); /* If we proceed with the array type as it is, we'll eventually crash in tree_to_[su]hwi(). */ type = error_mark_node; } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (storage_class == csc_typedef) { tree decl; if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, TYPE_DECL, declarator->u.id, type); if (declspecs->explicit_signed_p) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl); if (warn_cxx_compat && declarator->u.id != NULL_TREE) { struct c_binding *b = I_TAG_BINDING (declarator->u.id); if (b != NULL && b->decl != NULL_TREE && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type)) { warning_at (declarator->id_loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } return decl; } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids const or volatile function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); return type; } if (pedantic && decl_context == FIELD && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.2.1p8 */ pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot " "have a variably modified type"); } /* Aside from typedefs and type names (handle above), `void' at top level (not within pointer) is allowed only in public variables. We don't complain about parms either, but that is because a better error message can be made later. */ if (VOID_TYPE_P (type) && decl_context != PARM && !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE) && (storage_class == csc_extern || (current_scope == file_scope && !(storage_class == csc_static || storage_class == csc_register))))) { error_at (loc, "variable or field %qE declared void", name); type = integer_type_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ { tree decl; if (decl_context == PARM) { tree promoted_type; bool array_parameter_p = false; /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = TREE_TYPE (type); if (orig_qual_type != NULL_TREE) { if (orig_qual_indirect == 0) orig_qual_type = TREE_TYPE (orig_qual_type); else orig_qual_indirect--; } if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); type = c_build_pointer_type (type); type_quals = array_ptr_quals; if (type_quals) type = c_build_qualified_type (type, type_quals); /* We don't yet implement attributes in this context. */ if (array_ptr_attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "attributes in parameter array declarator ignored"); size_varies = false; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type = c_build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; } else if (type_quals) type = c_build_qualified_type (type, type_quals); decl = build_decl (declarator->id_loc, PARM_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; C_ARRAY_PARAMETER (decl) = array_parameter_p; /* Compute the type actually passed in the parmlist, for the case where there is no prototype. (For example, shorts and chars are passed as ints.) When there is a prototype, this is overridden later. */ if (type == error_mark_node) promoted_type = type; else promoted_type = c_type_promotes_to (type); DECL_ARG_TYPE (decl) = promoted_type; if (declspecs->inline_p) pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl); } else if (decl_context == FIELD) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); /* Structure field. It may not be a function. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "field %qE declared as a function", name); type = build_pointer_type (type); } else if (TREE_CODE (type) != ERROR_MARK && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type)) { if (name) error_at (loc, "field %qE has incomplete type", name); else error_at (loc, "unnamed field has incomplete type"); type = error_mark_node; } else if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { /* We have a flexible array member through a typedef. Set suitable range. Whether this is a correct position for a flexible array member will be determined elsewhere. */ if (!in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node, NULL_TREE); } type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, FIELD_DECL, declarator->u.id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !declarator->u.id) TREE_NO_WARNING (decl) = 1; if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (storage_class == csc_register || threadp) { error_at (loc, "invalid storage class for function %qE", name); } else if (current_scope != file_scope) { /* Function declaration not at file scope. Storage classes other than `extern' are not allowed, C99 6.7.1p5, and `extern' makes no difference. However, GCC allows 'auto', perhaps with 'inline', to support nested functions. */ if (storage_class == csc_auto) pedwarn (loc, OPT_Wpedantic, "invalid storage class for function %qE", name); else if (storage_class == csc_static) { error_at (loc, "invalid storage class for function %qE", name); if (funcdef_flag) storage_class = declspecs->storage_class = csc_none; else return 0; } } decl = build_decl (declarator->id_loc, FUNCTION_DECL, declarator->u.id, type); decl = build_decl_attribute_variant (decl, decl_attr); if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); /* Every function declaration is an external reference (DECL_EXTERNAL) except for those which are not at file scope and are explicitly declared "auto". This is forbidden by standard C (C99 6.7.1p5) and is interpreted by GCC to signify a forward declaration of a nested function. */ if (storage_class == csc_auto && current_scope != file_scope) DECL_EXTERNAL (decl) = 0; /* In C99, a function which is declared 'inline' with 'extern' is not an external reference (which is confusing). It means that the later definition of the function must be output in this file, C99 6.7.4p6. In GNU C89, a function declared 'extern inline' is an external reference. */ else if (declspecs->inline_p && storage_class != csc_static) DECL_EXTERNAL (decl) = ((storage_class == csc_extern) == flag_gnu89_inline); else DECL_EXTERNAL (decl) = !initialized; /* Record absence of global scope for `static' or `auto'. */ TREE_PUBLIC (decl) = !(storage_class == csc_static || storage_class == csc_auto); /* For a function definition, record the argument information block where store_parm_decls will look for it. */ if (funcdef_flag) current_function_arg_info = arg_info; if (declspecs->default_int_p) C_FUNCTION_IMPLICIT_INT (decl) = 1; /* Record presence of `inline' and `_Noreturn', if it is reasonable. */ if (flag_hosted && MAIN_NAME_P (declarator->u.id)) { if (declspecs->inline_p) pedwarn (loc, 0, "cannot inline function %<main%>"); if (declspecs->noreturn_p) pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>"); } else { if (declspecs->inline_p) /* Record that the function is declared `inline'. */ DECL_DECLARED_INLINE_P (decl) = 1; if (declspecs->noreturn_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Noreturn%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Noreturn%>"); TREE_THIS_VOLATILE (decl) = 1; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ int extern_ref = !initialized && storage_class == csc_extern; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); /* C99 6.2.2p7: It is invalid (compile-time undefined behavior) to create an 'extern' declaration for a variable if there is a global declaration that is 'static' and the global declaration is not visible. (If the static declaration _is_ currently visible, the 'extern' declaration is taken to refer to that decl.) */ if (extern_ref && current_scope != file_scope) { tree global_decl = identifier_global_value (declarator->u.id); tree visible_decl = lookup_name (declarator->u.id); if (global_decl && global_decl != visible_decl && TREE_CODE (global_decl) == VAR_DECL && !TREE_PUBLIC (global_decl)) error_at (loc, "variable previously declared %<static%> " "redeclared %<extern%>"); } decl = build_decl (declarator->id_loc, VAR_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl); /* At file scope, an initialized extern declaration may follow a static declaration. In that case, DECL_EXTERNAL will be reset later in start_decl. */ DECL_EXTERNAL (decl) = (storage_class == csc_extern); /* At file scope, the presence of a `static' or `register' storage class specifier, or the absence of all storage class specifiers makes this declaration a definition (perhaps tentative). Also, the absence of `static' makes it public. */ if (current_scope == file_scope) { TREE_PUBLIC (decl) = storage_class != csc_static; TREE_STATIC (decl) = !extern_ref; } /* Not at file scope, only `static' makes a static definition. */ else { TREE_STATIC (decl) = (storage_class == csc_static); TREE_PUBLIC (decl) = extern_ref; } if (threadp) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if ((storage_class == csc_extern || (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE && !funcdef_flag)) && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.5.2p2 */ if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "non-nested function with variably modified type"); else error_at (loc, "object with variably modified type must have " "no linkage"); } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == csc_register) { C_DECL_REGISTER (decl) = 1; DECL_REGISTER (decl) = 1; } /* Record constancy and volatility. */ c_apply_type_quals_to_decl (type_quals, decl); /* Apply _Alignas specifiers. */ if (alignas_align) { DECL_ALIGN (decl) = alignas_align * BITS_PER_UNIT; DECL_USER_ALIGN (decl) = 1; } /* If a type has volatile components, it should be stored in memory. Otherwise, the fact that those components are volatile will be ignored, and would even crash the compiler. Of course, this only makes sense on VAR,PARM, and RESULT decl's. */ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)) && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL)) { /* It is not an error for a structure with volatile fields to be declared register, but reset DECL_REGISTER since it cannot actually go in a register. */ int was_reg = C_DECL_REGISTER (decl); C_DECL_REGISTER (decl) = 0; DECL_REGISTER (decl) = 0; c_mark_addressable (decl); C_DECL_REGISTER (decl) = was_reg; } /* This is the earliest point at which we might know the assembler name of a variable. Thus, if it's known before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl)); if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && TREE_PUBLIC (decl) && TREE_STATIC (decl) && (TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, ("non-local variable %qD with anonymous type is " "questionable in C++"), decl); return decl; } } /* Decode the parameter-list info for a function type or function definition. The argument is the value returned by `get_parm_info' (or made in c-parse.c if there is an identifier list instead of a parameter decl list). These two functions are separate because when a function returns or receives functions then each is called multiple times but the order of calls is different. The last call to `grokparms' is always the one that contains the formal parameter names of a function definition. Return a list of arg types to use in the FUNCTION_TYPE for this function. FUNCDEF_FLAG is true for a function definition, false for a mere declaration. A nonempty identifier-list gets an error message when FUNCDEF_FLAG is false. */ static tree grokparms (struct c_arg_info *arg_info, bool funcdef_flag) { tree arg_types = arg_info->types; if (funcdef_flag && arg_info->had_vla_unspec) { /* A function definition isn't function prototype scope C99 6.2.1p4. */ /* C99 6.7.5.2p4 */ error ("%<[*]%> not allowed in other than function prototype scope"); } if (arg_types == 0 && !funcdef_flag && !in_system_header_at (input_location)) warning (OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); if (arg_types == error_mark_node) return 0; /* don't set TYPE_ARG_TYPES in this case */ else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE) { if (!funcdef_flag) { pedwarn (input_location, 0, "parameter names (without types) in function declaration"); arg_info->parms = NULL_TREE; } else arg_info->parms = arg_info->types; arg_info->types = 0; return 0; } else { tree parm, type, typelt; unsigned int parmno; const char *errmsg; /* If there is a parameter of incomplete type in a definition, this is an error. In a declaration this is valid, and a struct or union type may be completed later, before any calls or definition of the function. In the case where the tag was first declared within the parameter list, a warning has already been given. If a parameter has void type, then however the function cannot be defined or called, so warn. */ for (parm = arg_info->parms, typelt = arg_types, parmno = 1; parm; parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++) { type = TREE_VALUE (typelt); if (type == error_mark_node) continue; if (!COMPLETE_TYPE_P (type)) { if (funcdef_flag) { if (DECL_NAME (parm)) error_at (input_location, "parameter %u (%q+D) has incomplete type", parmno, parm); else error_at (DECL_SOURCE_LOCATION (parm), "parameter %u has incomplete type", parmno); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } else if (VOID_TYPE_P (type)) { if (DECL_NAME (parm)) warning_at (input_location, 0, "parameter %u (%q+D) has void type", parmno, parm); else warning_at (DECL_SOURCE_LOCATION (parm), 0, "parameter %u has void type", parmno); } } errmsg = targetm.invalid_parameter_type (type); if (errmsg) { error (errmsg); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } if (DECL_NAME (parm) && TREE_USED (parm)) warn_if_shadowing (parm); } return arg_types; } } /* Allocate and initialize a c_arg_info structure from the parser's obstack. */ struct c_arg_info * build_arg_info (void) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = NULL_TREE; ret->tags = NULL; ret->types = NULL_TREE; ret->others = NULL_TREE; ret->pending_sizes = NULL; ret->had_vla_unspec = 0; return ret; } /* Take apart the current scope and return a c_arg_info structure with info on a parameter list just parsed. This structure is later fed to 'grokparms' and 'store_parm_decls'. ELLIPSIS being true means the argument list ended in '...' so don't append a sentinel (void_list_node) to the end of the type-list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ struct c_arg_info * get_parm_info (bool ellipsis, tree expr) { struct c_binding *b = current_scope->bindings; struct c_arg_info *arg_info = build_arg_info (); tree parms = 0; vec<c_arg_tag, va_gc> *tags = NULL; tree types = 0; tree others = 0; static bool explained_incomplete_types = false; bool gave_void_only_once_err = false; arg_info->had_vla_unspec = current_scope->had_vla_unspec; /* The bindings in this scope must not get put into a block. We will take care of deleting the binding nodes. */ current_scope->bindings = 0; /* This function is only called if there was *something* on the parameter list. */ gcc_assert (b); /* A parameter list consisting solely of 'void' indicates that the function takes no arguments. But if the 'void' is qualified (by 'const' or 'volatile'), or has a storage class specifier ('register'), then the behavior is undefined; issue an error. Typedefs for 'void' are OK (see DR#157). */ if (b->prev == 0 /* one binding */ && TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */ && !DECL_NAME (b->decl) /* anonymous */ && VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */ { if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED || C_DECL_REGISTER (b->decl)) error ("%<void%> as only parameter may not be qualified"); /* There cannot be an ellipsis. */ if (ellipsis) error ("%<void%> must be the only parameter"); arg_info->types = void_list_node; return arg_info; } if (!ellipsis) types = void_list_node; /* Break up the bindings list into parms, tags, types, and others; apply sanity checks; purge the name-to-decl bindings. */ while (b) { tree decl = b->decl; tree type = TREE_TYPE (decl); c_arg_tag tag; const char *keyword; switch (TREE_CODE (decl)) { case PARM_DECL: if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } /* Check for forward decls that never got their actual decl. */ if (TREE_ASM_WRITTEN (decl)) error ("parameter %q+D has just a forward declaration", decl); /* Check for (..., void, ...) and issue an error. */ else if (VOID_TYPE_P (type) && !DECL_NAME (decl)) { if (!gave_void_only_once_err) { error ("%<void%> must be the only parameter"); gave_void_only_once_err = true; } } else { /* Valid parameter, add it to the list. */ DECL_CHAIN (decl) = parms; parms = decl; /* Since there is a prototype, args are passed in their declared types. The back end may override this later. */ DECL_ARG_TYPE (decl) = type; types = tree_cons (0, type, types); } break; case ENUMERAL_TYPE: keyword = "enum"; goto tag; case UNION_TYPE: keyword = "union"; goto tag; case RECORD_TYPE: keyword = "struct"; goto tag; tag: /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } /* Warn about any struct, union or enum tags defined in a parameter list. The scope of such types is limited to the parameter list, which is rarely if ever desirable (it's impossible to call such a function with type- correct arguments). An anonymous union parm type is meaningful as a GNU extension, so don't warn for that. */ if (TREE_CODE (decl) != UNION_TYPE || b->id != 0) { if (b->id) /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "%<%s %E%> declared inside parameter list", keyword, b->id); else /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "anonymous %s declared inside parameter list", keyword); if (!explained_incomplete_types) { warning (0, "its scope is only this definition or declaration," " which is probably not what you want"); explained_incomplete_types = true; } } tag.id = b->id; tag.type = decl; vec_safe_push (tags, tag); break; case CONST_DECL: case TYPE_DECL: case FUNCTION_DECL: /* CONST_DECLs appear here when we have an embedded enum, and TYPE_DECLs appear here when we have an embedded struct or union. No warnings for this - we already warned about the type itself. FUNCTION_DECLs appear when there is an implicit function declaration in the parameter list. */ /* When we reinsert this decl in the function body, we need to reconstruct whether it was marked as nested. */ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL ? b->nested : !b->nested); DECL_CHAIN (decl) = others; others = decl; /* fall through */ case ERROR_MARK: /* error_mark_node appears here when we have an undeclared variable. Just throw it away. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } break; /* Other things that might be encountered. */ case LABEL_DECL: case VAR_DECL: default: gcc_unreachable (); } b = free_binding_and_advance (b); } arg_info->parms = parms; arg_info->tags = tags; arg_info->types = types; arg_info->others = others; arg_info->pending_sizes = expr; return arg_info; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference with location LOC if it is not defined. Return a c_typespec structure for the type specifier. */ struct c_typespec parser_xref_tag (location_t loc, enum tree_code code, tree name) { struct c_typespec ret; tree ref; location_t refloc; ret.expr = NULL_TREE; ret.expr_const_operands = true; /* If a cross reference is requested, look up the type already defined for this tag and return it. */ ref = lookup_tag (code, name, 0, &refloc); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the wrong type in the same scope, we will have had an error message already; if in a different scope and declaring a name, pending_xref_error will give an error message; but if in a different scope and not declaring a name, this tag should shadow the previous declaration of a different type of tag, and this would not work properly if we return the reference found. (For example, with "struct foo" in an outer scope, "union foo;" must shadow that tag with a new one of union type.) */ ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref); if (ref && TREE_CODE (ref) == code) { if (C_TYPE_DEFINED_IN_STRUCT (ref) && loc != UNKNOWN_LOCATION && warn_cxx_compat) { switch (code) { case ENUMERAL_TYPE: warning_at (loc, OPT_Wc___compat, ("enum type defined in struct or union " "is not visible in C++")); inform (refloc, "enum type defined here"); break; case RECORD_TYPE: warning_at (loc, OPT_Wc___compat, ("struct defined in struct or union " "is not visible in C++")); inform (refloc, "struct defined here"); break; case UNION_TYPE: warning_at (loc, OPT_Wc___compat, ("union defined in struct or union " "is not visible in C++")); inform (refloc, "union defined here"); break; default: gcc_unreachable(); } } ret.spec = ref; return ret; } /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ ref = make_node (code); if (code == ENUMERAL_TYPE) { /* Give the type a default layout like unsigned int to avoid crashing if it does not get defined. */ SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node)); TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node); TYPE_USER_ALIGN (ref) = 0; TYPE_UNSIGNED (ref) = 1; TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node); TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node); TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node); } pushtag (loc, name, ref); ret.spec = ref; return ret; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. Return a tree for the type. */ tree xref_tag (enum tree_code code, tree name) { return parser_xref_tag (input_location, code, name).spec; } /* Make sure that the tag NAME is defined *in the current scope* at least as a forward reference. LOC is the location of the struct's definition. CODE says which kind of tag NAME ought to be. This stores the current value of the file static STRUCT_PARSE_INFO in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a new c_struct_parse_info structure. The old value of STRUCT_PARSE_INFO is restored in finish_struct. */ tree start_struct (location_t loc, enum tree_code code, tree name, struct c_struct_parse_info **enclosing_struct_parse_info) { /* If there is already a tag defined at this scope (as a forward reference), just return it. */ tree ref = NULL_TREE; location_t refloc = UNKNOWN_LOCATION; if (name != NULL_TREE) ref = lookup_tag (code, name, 1, &refloc); if (ref && TREE_CODE (ref) == code) { if (TYPE_SIZE (ref)) { if (code == UNION_TYPE) error_at (loc, "redefinition of %<union %E%>", name); else error_at (loc, "redefinition of %<struct %E%>", name); if (refloc != UNKNOWN_LOCATION) inform (refloc, "originally defined here"); /* Don't create structures using a name already in use. */ ref = NULL_TREE; } else if (C_TYPE_BEING_DEFINED (ref)) { if (code == UNION_TYPE) error_at (loc, "nested redefinition of %<union %E%>", name); else error_at (loc, "nested redefinition of %<struct %E%>", name); /* Don't bother to report "originally defined here" for a nested redefinition; the original definition should be obvious. */ /* Don't create structures that contain themselves. */ ref = NULL_TREE; } } /* Otherwise create a forward-reference just so the tag is in scope. */ if (ref == NULL_TREE || TREE_CODE (ref) != code) { ref = make_node (code); pushtag (loc, name, ref); } C_TYPE_BEING_DEFINED (ref) = 1; TYPE_PACKED (ref) = flag_pack_struct; *enclosing_struct_parse_info = struct_parse_info; struct_parse_info = XNEW (struct c_struct_parse_info); struct_parse_info->struct_types.create (0); struct_parse_info->fields.create (0); struct_parse_info->typedefs_seen.create (0); /* FIXME: This will issue a warning for a use of a type defined within a statement expr used within sizeof, et. al. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return ref; } /* Process the specs, declarator and width (NULL if omitted) of a structure component, returning a FIELD_DECL node. WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node. DECL_ATTRS is as for grokdeclarator. LOC is the location of the structure component. This is done during the parsing of the struct declaration. The FIELD_DECL nodes are chained together and the lot of them are ultimately passed to `build_struct' to make the RECORD_TYPE node. */ tree grokfield (location_t loc, struct c_declarator *declarator, struct c_declspecs *declspecs, tree width, tree *decl_attrs) { tree value; if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE && width == NULL_TREE) { /* This is an unnamed decl. If we have something of the form "union { list } ;" then this is the anonymous union extension. Similarly for struct. If this is something of the form "struct foo;", then If MS or Plan 9 extensions are enabled, this is handled as an anonymous struct. Otherwise this is a forward declaration of a structure tag. If this is something of the form "foo;" and foo is a TYPE_DECL, then If foo names a structure or union without a tag, then this is an anonymous struct (this is permitted by C11). If MS or Plan 9 extensions are enabled and foo names a structure, then again this is an anonymous struct. Otherwise this is an error. Oh what a horrid tangled web we weave. I wonder if MS consciously took this from Plan 9 or if it was an accident of implementation that took root before someone noticed the bug... */ tree type = declspecs->type; bool type_ok = (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE); bool ok = false; if (type_ok && (flag_ms_extensions || flag_plan9_extensions || !declspecs->typedef_p)) { if (flag_ms_extensions || flag_plan9_extensions) ok = true; else if (TYPE_NAME (type) == NULL) ok = true; else ok = false; } if (!ok) { pedwarn (loc, 0, "declaration does not declare anything"); return NULL_TREE; } if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 doesn%'t support unnamed structs/unions"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 doesn%'t support unnamed structs/unions"); } value = grokdeclarator (declarator, declspecs, FIELD, false, width ? &width : NULL, decl_attrs, NULL, NULL, DEPRECATED_NORMAL); finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE); DECL_INITIAL (value) = width; if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE) { /* If we currently have a binding for this field, set the in_struct field in the binding, so that we warn about lookups which find it. */ struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value)); if (b != NULL) { /* If the in_struct field is not yet set, push it on a list to be cleared when this struct is finished. */ if (!b->in_struct) { struct_parse_info->fields.safe_push (b); b->in_struct = 1; } } } return value; } /* Subroutine of detect_field_duplicates: return whether X and Y, which are both fields in the same struct, have duplicate field names. */ static bool is_duplicate_field (tree x, tree y) { if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y)) return true; /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE)) { tree xt, xn, yt, yn; xt = TREE_TYPE (x); if (DECL_NAME (x) != NULL_TREE) xn = DECL_NAME (x); else if ((TREE_CODE (xt) == RECORD_TYPE || TREE_CODE (xt) == UNION_TYPE) && TYPE_NAME (xt) != NULL_TREE && TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL) xn = DECL_NAME (TYPE_NAME (xt)); else xn = NULL_TREE; yt = TREE_TYPE (y); if (DECL_NAME (y) != NULL_TREE) yn = DECL_NAME (y); else if ((TREE_CODE (yt) == RECORD_TYPE || TREE_CODE (yt) == UNION_TYPE) && TYPE_NAME (yt) != NULL_TREE && TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL) yn = DECL_NAME (TYPE_NAME (yt)); else yn = NULL_TREE; if (xn != NULL_TREE && xn == yn) return true; } return false; } /* Subroutine of detect_field_duplicates: add the fields of FIELDLIST to HTAB, giving errors for any duplicates. */ static void detect_field_duplicates_hash (tree fieldlist, hash_table<pointer_hash <tree_node> > *htab) { tree x, y; tree_node **slot; for (x = fieldlist; x ; x = DECL_CHAIN (x)) if ((y = DECL_NAME (x)) != 0) { slot = htab->find_slot (y, INSERT); if (*slot) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } *slot = y; } else if (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) { detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab); /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL) { tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x))); slot = htab->find_slot (xn, INSERT); if (*slot) error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x))); *slot = xn; } } } /* Generate an error for any duplicate field names in FIELDLIST. Munge the list such that this does not present a problem later. */ static void detect_field_duplicates (tree fieldlist) { tree x, y; int timeout = 10; /* If the struct is the list of instance variables of an Objective-C class, then we need to check all the instance variables of superclasses when checking for duplicates (since you can't have an instance variable in a subclass with the same name as an instance variable in a superclass). We pass on this job to the Objective-C compiler. objc_detect_field_duplicates() will return false if we are not checking the list of instance variables and the C frontend should proceed with the standard field duplicate checks. If we are checking the list of instance variables, the ObjC frontend will do the check, emit the errors if needed, and then return true. */ if (c_dialect_objc ()) if (objc_detect_field_duplicates (false)) return; /* First, see if there are more than "a few" fields. This is trivially true if there are zero or one fields. */ if (!fieldlist || !DECL_CHAIN (fieldlist)) return; x = fieldlist; do { timeout--; if (DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) timeout = 0; x = DECL_CHAIN (x); } while (timeout > 0 && x); /* If there were "few" fields and no anonymous structures or unions, avoid the overhead of allocating a hash table. Instead just do the nested traversal thing. */ if (timeout > 0) { for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x)) /* When using -fplan9-extensions, we can have duplicates between typedef names and fields. */ if (DECL_NAME (x) || (flag_plan9_extensions && DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)) { for (y = fieldlist; y != x; y = TREE_CHAIN (y)) if (is_duplicate_field (y, x)) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } } } else { hash_table<pointer_hash <tree_node> > htab (37); detect_field_duplicates_hash (fieldlist, &htab); } } /* Finish up struct info used by -Wc++-compat. */ static void warn_cxx_compat_finish_struct (tree fieldlist) { unsigned int ix; tree x; struct c_binding *b; /* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in the current struct. We do this now at the end of the struct because the flag is used to issue visibility warnings, and we only want to issue those warnings if the type is referenced outside of the struct declaration. */ FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x) C_TYPE_DEFINED_IN_STRUCT (x) = 1; /* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of typedefs used when declaring fields in this struct. If the name of any of the fields is also a typedef name then the struct would not parse in C++, because the C++ lookup rules say that the typedef name would be looked up in the context of the struct, and would thus be the field rather than the typedef. */ if (!struct_parse_info->typedefs_seen.is_empty () && fieldlist != NULL_TREE) { /* Use a hash_set<tree> using the name of the typedef. We can use a hash_set<tree> because identifiers are interned. */ hash_set<tree> tset; FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x) tset.add (DECL_NAME (x)); for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE && tset.contains (DECL_NAME (x))) { warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat, ("using %qD as both field and typedef name is " "invalid in C++"), x); /* FIXME: It would be nice to report the location where the typedef name is used. */ } } } /* For each field which has a binding and which was not defined in an enclosing struct, clear the in_struct field. */ FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b) b->in_struct = 0; } /* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T. LOC is the location of the RECORD_TYPE or UNION_TYPE's definition. FIELDLIST is a chain of FIELD_DECL nodes for the fields. ATTRIBUTES are attributes to be applied to the structure. ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when the struct was started. */ tree finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, struct c_struct_parse_info *enclosing_struct_parse_info) { tree x; bool toplevel = file_scope == current_scope; int saw_named_field; /* If this type was previously laid out as a forward reference, make sure we lay it out again. */ TYPE_SIZE (t) = 0; decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); if (pedantic) { for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != 0) break; if (flag_isoc11 && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) break; } if (x == 0) { if (TREE_CODE (t) == UNION_TYPE) { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "union has no named members"); else pedwarn (loc, OPT_Wpedantic, "union has no members"); } else { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "struct has no named members"); else pedwarn (loc, OPT_Wpedantic, "struct has no members"); } } } /* Install struct as DECL_CONTEXT of each field decl. Also process specified field sizes, found in the DECL_INITIAL, storing 0 there after the type has been changed to precision equal to its width, rather than the precision of the specified standard type. (Correct layout requires the original type to have been preserved until now.) */ saw_named_field = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (TREE_TYPE (x) == error_mark_node) continue; DECL_CONTEXT (x) = t; /* If any field is const, the structure type is pseudo-const. */ if (TREE_READONLY (x)) C_TYPE_FIELDS_READONLY (t) = 1; else { /* A field that is pseudo-const makes the structure likewise. */ tree t1 = strip_array_types (TREE_TYPE (x)); if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (t1)) C_TYPE_FIELDS_READONLY (t) = 1; } /* Any field that is volatile means variables of this type must be treated in some ways as volatile. */ if (TREE_THIS_VOLATILE (x)) C_TYPE_FIELDS_VOLATILE (t) = 1; /* Any field of nominal variable size implies structure is too. */ if (C_DECL_VARIABLE_SIZE (x)) C_TYPE_VARIABLE_SIZE (t) = 1; if (DECL_INITIAL (x)) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; SET_DECL_C_BIT_FIELD (x); } if (TYPE_PACKED (t) && (DECL_BIT_FIELD (x) || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)) DECL_PACKED (x) = 1; /* Detect flexible array member in an invalid context. */ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in union"); TREE_TYPE (x) = error_mark_node; } else if (DECL_CHAIN (x) != NULL_TREE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member not at end of struct"); TREE_TYPE (x) = error_mark_node; } else if (!saw_named_field) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in otherwise empty struct"); TREE_TYPE (x) = error_mark_node; } } if (pedantic && TREE_CODE (t) == RECORD_TYPE && flexible_array_type_p (TREE_TYPE (x))) pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic, "invalid use of structure with flexible array member"); if (DECL_NAME (x) || TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) saw_named_field = 1; } detect_field_duplicates (fieldlist); /* Now we have the nearly final fieldlist. Record it, then lay out the structure or union (including the fields). */ TYPE_FIELDS (t) = fieldlist; layout_type (t); if (TYPE_SIZE_UNIT (t) && TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST && !TREE_OVERFLOW (TYPE_SIZE_UNIT (t)) && !valid_constant_size_p (TYPE_SIZE_UNIT (t))) error ("type %qT is too large", t); /* Give bit-fields their proper types. */ { tree *fieldlistp = &fieldlist; while (*fieldlistp) if (TREE_CODE (*fieldlistp) == FIELD_DECL && DECL_INITIAL (*fieldlistp) && TREE_TYPE (*fieldlistp) != error_mark_node) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (*fieldlistp)); tree type = TREE_TYPE (*fieldlistp); if (width != TYPE_PRECISION (type)) { TREE_TYPE (*fieldlistp) = c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type)); DECL_MODE (*fieldlistp) = TYPE_MODE (TREE_TYPE (*fieldlistp)); } DECL_INITIAL (*fieldlistp) = 0; } else fieldlistp = &DECL_CHAIN (*fieldlistp); } /* Now we have the truly final field list. Store it in this type and in the variants. */ TYPE_FIELDS (t) = fieldlist; /* If there are lots of fields, sort so we can look through them fast. We arbitrarily consider 16 or more elts to be "a lot". */ { int len = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (len > 15 || DECL_NAME (x) == NULL) break; len += 1; } if (len > 15) { tree *field_array; struct lang_type *space; struct sorted_fields_type *space2; len += list_length (x); /* Use the same allocation policy here that make_node uses, to ensure that this lives as long as the rest of the struct decl. All decls in an inline function need to be saved. */ space = ggc_cleared_alloc<struct lang_type> (); space2 = (sorted_fields_type *) ggc_internal_alloc (sizeof (struct sorted_fields_type) + len * sizeof (tree)); len = 0; space->s = space2; field_array = &space2->elts[0]; for (x = fieldlist; x; x = DECL_CHAIN (x)) { field_array[len++] = x; /* If there is anonymous struct or union, break out of the loop. */ if (DECL_NAME (x) == NULL) break; } /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */ if (x == NULL) { TYPE_LANG_SPECIFIC (t) = space; TYPE_LANG_SPECIFIC (t)->s->len = len; field_array = TYPE_LANG_SPECIFIC (t)->s->elts; qsort (field_array, len, sizeof (tree), field_decl_cmp); } } } for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x)) { TYPE_FIELDS (x) = TYPE_FIELDS (t); TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t); C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t); C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t); C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t); } /* If this was supposed to be a transparent union, but we can't make it one, warn and turn off the flag. */ if (TREE_CODE (t) == UNION_TYPE && TYPE_TRANSPARENT_AGGR (t) && (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))) { TYPE_TRANSPARENT_AGGR (t) = 0; warning_at (loc, 0, "union cannot be made transparent"); } /* If this structure or union completes the type of any previous variable declaration, lay it out and output its rtl. */ for (x = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)); x; x = TREE_CHAIN (x)) { tree decl = TREE_VALUE (x); if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (decl)); if (TREE_CODE (decl) != TYPE_DECL) { layout_decl (decl, 0); if (c_dialect_objc ()) objc_check_decl (decl); rest_of_decl_compilation (decl, toplevel, 0); } } C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)) = 0; /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ if (TYPE_STUB_DECL (t)) DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc; /* Finish debugging output for this type. */ rest_of_type_compilation (t, toplevel); /* If we're inside a function proper, i.e. not file-scope and not still parsing parameters, then arrange for the size of a variable sized type to be bound now. */ if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE)) add_stmt (build_stmt (loc, DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t))); if (warn_cxx_compat) warn_cxx_compat_finish_struct (fieldlist); struct_parse_info->struct_types.release (); struct_parse_info->fields.release (); struct_parse_info->typedefs_seen.release (); XDELETE (struct_parse_info); struct_parse_info = enclosing_struct_parse_info; /* If this struct is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (t); return t; } /* Lay out the type T, and its element type, and so on. */ static void layout_array_type (tree t) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (t)); layout_type (t); } /* Begin compiling the definition of an enumeration type. NAME is its name (or null if anonymous). LOC is the enum's location. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (location_t loc, struct c_enum_contents *the_enum, tree name) { tree enumtype = NULL_TREE; location_t enumloc = UNKNOWN_LOCATION; /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (name != NULL_TREE) enumtype = lookup_tag (ENUMERAL_TYPE, name, 1, &enumloc); if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (loc, name, enumtype); } if (C_TYPE_BEING_DEFINED (enumtype)) error_at (loc, "nested redefinition of %<enum %E%>", name); C_TYPE_BEING_DEFINED (enumtype) = 1; if (TYPE_VALUES (enumtype) != 0) { /* This enum is a named one that has been declared already. */ error_at (loc, "redeclaration of %<enum %E%>", name); if (enumloc != UNKNOWN_LOCATION) inform (enumloc, "originally defined here"); /* Completely replace its old definition. The old enumerators remain defined, however. */ TYPE_VALUES (enumtype) = 0; } the_enum->enum_next_value = integer_zero_node; the_enum->enum_overflow = 0; if (flag_short_enums) TYPE_PACKED (enumtype) = 1; /* FIXME: This will issue a warning for a use of a type defined within sizeof in a statement expr. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type and finish it off. ENUMTYPE is the type object, VALUES a list of decl-value pairs, and ATTRIBUTES are the specified attributes. Returns ENUMTYPE. */ tree finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; int precision; signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); /* Calculate the maximum value of any enumerator in this type. */ if (values == error_mark_node) minnode = maxnode = integer_zero_node; else { minnode = maxnode = TREE_VALUE (values); for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair)) { tree value = TREE_VALUE (pair); if (tree_int_cst_lt (maxnode, value)) maxnode = value; if (tree_int_cst_lt (value, minnode)) minnode = value; } } /* Construct the final type of this enumeration. It is the same as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; precision = MAX (tree_int_cst_min_precision (minnode, sign), tree_int_cst_min_precision (maxnode, sign)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype)) { if (precision > TYPE_PRECISION (enumtype)) { TYPE_PRECISION (enumtype) = 0; error ("specified mode too small for enumeral values"); } else precision = TYPE_PRECISION (enumtype); } if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node) || TYPE_PRECISION (enumtype)) { tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); tem = long_long_integer_type_node; } } else tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem); TYPE_SIZE (enumtype) = 0; TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem); layout_type (enumtype); if (values != error_mark_node) { /* Change the type of the enumerators to be the enum type. We need to do this irrespective of the size of the enum, for proper type checking. Replace the DECL_INITIALs of the enumerators, and the value slots of the list, with copies that have the enum type; they cannot be modified in place because they may be shared (e.g. integer_zero_node) Finally, change the purpose slots to point to the names of the decls. */ for (pair = values; pair; pair = TREE_CHAIN (pair)) { tree enu = TREE_PURPOSE (pair); tree ini = DECL_INITIAL (enu); TREE_TYPE (enu) = enumtype; /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. build_enumerator() converts any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, build_enumerator() would have already warned about those that don't fit. Here we convert the rest to the enumerator type. */ if (TREE_TYPE (ini) != integer_type_node) ini = convert (enumtype, ini); DECL_INITIAL (enu) = ini; TREE_PURPOSE (pair) = DECL_NAME (enu); TREE_VALUE (pair) = ini; } TYPE_VALUES (enumtype) = values; } /* Record the min/max values so that we can warn about bit-field enumerations that are too small for the values. */ lt = ggc_cleared_alloc<struct lang_type> (); lt->enum_min = minnode; lt->enum_max = maxnode; TYPE_LANG_SPECIFIC (enumtype) = lt; /* Fix up all variant types of this enum type. */ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem)) { if (tem == enumtype) continue; TYPE_VALUES (tem) = TYPE_VALUES (enumtype); TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype); TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype); TYPE_SIZE (tem) = TYPE_SIZE (enumtype); TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype); SET_TYPE_MODE (tem, TYPE_MODE (enumtype)); TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype); TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype); TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype); TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype); TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, toplevel); /* If this enum is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (enumtype); return enumtype; } /* Build and install a CONST_DECL for one value of the current enumeration type (one that was begun with start_enum). DECL_LOC is the location of the enumerator. LOC is the location of the '=' operator if any, DECL_LOC otherwise. Return a tree-list containing the CONST_DECL and its value. Assignment of sequential values by default is handled here. */ tree build_enumerator (location_t decl_loc, location_t loc, struct c_enum_contents *the_enum, tree name, tree value) { tree decl, type; /* Validate and default VALUE. */ if (value != 0) { /* Don't issue more errors for error_mark_node (i.e. an undeclared identifier) - just ignore the value expression. */ if (value == error_mark_node) value = 0; else if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (loc, "enumerator value for %qE is not an integer constant", name); value = 0; } else { if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); if (TREE_CODE (value) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "enumerator value for %qE is not an integer " "constant expression", name); } if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qE is not an integer constant", name); value = 0; } else { value = default_conversion (value); constant_expression_warning (value); } } } /* Default based on previous value. */ /* It should no longer be possible to have NON_LVALUE_EXPR in the default. */ if (value == 0) { value = the_enum->enum_next_value; if (the_enum->enum_overflow) error_at (loc, "overflow in enumeration values"); } /* Even though the underlying type of an enum is unspecified, the type of enumeration constants is explicitly defined as int (6.4.4.3/2 in the C99 Standard). GCC allows any integer type as an extension. */ else if (!int_fits_type_p (value, integer_type_node)) pedwarn (loc, OPT_Wpedantic, "ISO C restricts enumerator values to range of %<int%>"); /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. Here we convert any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, we would have already warned about those that don't fit. We have to do this here rather than in finish_enum because this value may be used to define more enumerators. */ if (int_fits_type_p (value, integer_type_node)) value = convert (integer_type_node, value); /* Set basis for default for next value. */ the_enum->enum_next_value = build_binary_op (EXPR_LOC_OR_LOC (value, input_location), PLUS_EXPR, value, integer_one_node, 0); the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value); /* Now create a declaration for the enum value name. */ type = TREE_TYPE (value); type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node) && TYPE_UNSIGNED (type))); decl = build_decl (decl_loc, CONST_DECL, name, type); DECL_INITIAL (decl) = convert (type, value); pushdecl (decl); return tree_cons (decl, value, NULL_TREE); } /* Create the FUNCTION_DECL for a function definition. DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns 1 on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return 0, which tells yyparse to report a parse error. */ int start_function (struct c_declspecs *declspecs, struct c_declarator *declarator, tree attributes) { tree decl1, old_decl; tree restype, resdecl; location_t loc; current_function_returns_value = 0; /* Assume, until we see it does. */ current_function_returns_null = 0; current_function_returns_abnormally = 0; warn_about_return_type = 0; c_switch_stack = NULL; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL, &attributes, NULL, NULL, DEPRECATED_NORMAL); /* If the declarator is not suitable for a function definition, cause a syntax error. */ if (decl1 == 0 || TREE_CODE (decl1) != FUNCTION_DECL) return 0; loc = DECL_SOURCE_LOCATION (decl1); c_decl_attributes (&decl1, attributes, 0); if (DECL_DECLARED_INLINE_P (decl1) && DECL_UNINLINABLE (decl1) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1))) warning_at (loc, OPT_Wattributes, "inline function %qD given attribute noinline", decl1); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl1) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1)) || current_function_decl)) { if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1); } announce_function (decl1); if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1)))) { error_at (loc, "return type is an incomplete type"); /* Make it return void instead. */ TREE_TYPE (decl1) = build_function_type (void_type_node, TYPE_ARG_TYPES (TREE_TYPE (decl1))); } if (warn_about_return_type) warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int : (warn_return_type ? OPT_Wreturn_type : OPT_Wimplicit_int), "return type defaults to %<int%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in pop_scope) with the BLOCK. */ DECL_INITIAL (decl1) = error_mark_node; /* A nested function is not global. */ if (current_function_decl != 0) TREE_PUBLIC (decl1) = 0; /* If this definition isn't a prototype and we had a prototype declaration before, copy the arg type info from that prototype. */ old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL) old_decl = 0; current_function_prototype_locus = UNKNOWN_LOCATION; current_function_prototype_built_in = false; current_function_prototype_arg_types = NULL_TREE; if (!prototype_p (TREE_TYPE (decl1))) { if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (TREE_TYPE (old_decl)))) { TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl), TREE_TYPE (decl1)); current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (old_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl1)); } if (TREE_PUBLIC (decl1)) { /* If there is an external prototype declaration of this function, record its location but do not copy information to this decl. This may be an invisible declaration (built-in or in a scope which has finished) or simply have more refined argument types than any declaration found above. */ struct c_binding *b; for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed) if (B_IN_SCOPE (b, external_scope)) break; if (b) { tree ext_decl, ext_type; ext_decl = b->decl; ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl); if (TREE_CODE (ext_type) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (ext_type))) { current_function_prototype_locus = DECL_SOURCE_LOCATION (ext_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (ext_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (ext_type); } } } } /* Optionally warn of old-fashioned def with no previous prototype. */ if (warn_strict_prototypes && old_decl != error_mark_node && !prototype_p (TREE_TYPE (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl)) warning_at (loc, OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); /* Optionally warn of any global def with no previous prototype. */ else if (warn_missing_prototypes && old_decl != error_mark_node && TREE_PUBLIC (decl1) && !MAIN_NAME_P (DECL_NAME (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_prototypes, "no previous prototype for %qD", decl1); /* Optionally warn of any def with no previous prototype if the function has already been used. */ else if (warn_missing_prototypes && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && !prototype_p (TREE_TYPE (old_decl))) warning_at (loc, OPT_Wmissing_prototypes, "%qD was used with no prototype before its definition", decl1); /* Optionally warn of any global def with no previous declaration. */ else if (warn_missing_declarations && TREE_PUBLIC (decl1) && old_decl == 0 && !MAIN_NAME_P (DECL_NAME (decl1)) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); /* Optionally warn of any def with no previous declaration if the function has already been used. */ else if (warn_missing_declarations && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && C_DECL_IMPLICIT (old_decl)) warning_at (loc, OPT_Wmissing_declarations, "%qD was used with no declaration before its definition", decl1); /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* This is the earliest point at which we might know the assembler name of the function. Thus, if it's set before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1)); /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl1); /* Warn for unlikely, improbable, or stupid declarations of `main'. */ if (warn_main && MAIN_NAME_P (DECL_NAME (decl1))) { if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) != integer_type_node) pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1); else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1)))) pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD", decl1); check_main_parameter_types (decl1); if (!TREE_PUBLIC (decl1)) pedwarn (loc, OPT_Wmain, "%qD is normally a non-static function", decl1); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ current_function_decl = pushdecl (decl1); push_scope (); declare_parm_level (); restype = TREE_TYPE (TREE_TYPE (current_function_decl)); resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (current_function_decl) = resdecl; start_fname_decls (); return 1; } /* Subroutine of store_parm_decls which handles new-style function definitions (prototype format). The parms already have decls, so we need only record them as in effect and complain if any redundant old-style parm decls were written. */ static void store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info) { tree decl; c_arg_tag *tag; unsigned ix; if (current_scope->bindings) { error_at (DECL_SOURCE_LOCATION (fndecl), "old-style parameter declarations in prototyped " "function definition"); /* Get rid of the old-style declarations. */ pop_scope (); push_scope (); } /* Don't issue this warning for nested functions, and don't issue this warning if we got here because ARG_INFO_TYPES was error_mark_node (this happens when a function definition has just an ellipsis in its parameter list). */ else if (!in_system_header_at (input_location) && !current_function_scope && arg_info->types != error_mark_node) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional, "traditional C rejects ISO C style function definitions"); /* Now make all the parameter declarations visible in the function body. We can bypass most of the grunt work of pushdecl. */ for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) { bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); if (!TREE_USED (decl)) warn_if_shadowing (decl); } else error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted"); } /* Record the parameter list in the function declaration. */ DECL_ARGUMENTS (fndecl) = arg_info->parms; /* Now make all the ancillary declarations visible, likewise. */ for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/(TREE_CODE (decl) == FUNCTION_DECL), UNKNOWN_LOCATION); } /* And all the tag declarations. */ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) if (tag->id) bind (tag->id, tag->type, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of store_parm_decls which handles old-style function definitions (separate parameter list and declarations). */ static void store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info) { struct c_binding *b; tree parm, decl, last; tree parmids = arg_info->parms; hash_set<tree> seen_args; if (!in_system_header_at (input_location)) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); /* Match each formal parameter name with its declaration. Save each decl in the appropriate TREE_PURPOSE slot of the parmids chain. */ for (parm = parmids; parm; parm = TREE_CHAIN (parm)) { if (TREE_VALUE (parm) == 0) { error_at (DECL_SOURCE_LOCATION (fndecl), "parameter name missing from parameter list"); TREE_PURPOSE (parm) = 0; continue; } b = I_SYMBOL_BINDING (TREE_VALUE (parm)); if (b && B_IN_CURRENT_SCOPE (b)) { decl = b->decl; /* Skip erroneous parameters. */ if (decl == error_mark_node) continue; /* If we got something other than a PARM_DECL it is an error. */ if (TREE_CODE (decl) != PARM_DECL) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as a non-parameter", decl); /* If the declaration is already marked, we have a duplicate name. Complain and ignore the duplicate. */ else if (seen_args.contains (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "multiple parameters named %qD", decl); TREE_PURPOSE (parm) = 0; continue; } /* If the declaration says "void", complain and turn it into an int. */ else if (VOID_TYPE_P (TREE_TYPE (decl))) { error_at (DECL_SOURCE_LOCATION (decl), "parameter %qD declared with void type", decl); TREE_TYPE (decl) = integer_type_node; DECL_ARG_TYPE (decl) = integer_type_node; layout_decl (decl, 0); } warn_if_shadowing (decl); } /* If no declaration found, default to int. */ else { /* FIXME diagnostics: This should be the location of the argument, not the FNDECL. E.g., for an old-style declaration int f10(v) { blah; } We should use the location of the V, not the F10. Unfortunately, the V is an IDENTIFIER_NODE which has no location. In the future we need locations for c_arg_info entries. See gcc.dg/Wshadow-3.c for an example of this problem. */ decl = build_decl (DECL_SOURCE_LOCATION (fndecl), PARM_DECL, TREE_VALUE (parm), integer_type_node); DECL_ARG_TYPE (decl) = TREE_TYPE (decl); pushdecl (decl); warn_if_shadowing (decl); if (flag_isoc99) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wimplicit_int, "type of %qD defaults to %<int%>", decl); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wmissing_parameter_type, "type of %qD defaults to %<int%>", decl); } TREE_PURPOSE (parm) = decl; seen_args.add (decl); } /* Now examine the parms chain for incomplete declarations and declarations with no corresponding names. */ for (b = current_scope->bindings; b; b = b->prev) { parm = b->decl; if (TREE_CODE (parm) != PARM_DECL) continue; if (TREE_TYPE (parm) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (parm))) { error_at (DECL_SOURCE_LOCATION (parm), "parameter %qD has incomplete type", parm); TREE_TYPE (parm) = error_mark_node; } if (!seen_args.contains (parm)) { error_at (DECL_SOURCE_LOCATION (parm), "declaration for parameter %qD but no such parameter", parm); /* Pretend the parameter was not missing. This gets us to a standard state and minimizes further error messages. */ parmids = chainon (parmids, tree_cons (parm, 0, 0)); } } /* Chain the declarations together in the order of the list of names. Store that chain in the function decl, replacing the list of names. Update the current scope to match. */ DECL_ARGUMENTS (fndecl) = 0; for (parm = parmids; parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) break; if (parm && TREE_PURPOSE (parm)) { last = TREE_PURPOSE (parm); DECL_ARGUMENTS (fndecl) = last; for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) { DECL_CHAIN (last) = TREE_PURPOSE (parm); last = TREE_PURPOSE (parm); } DECL_CHAIN (last) = 0; } /* If there was a previous prototype, set the DECL_ARG_TYPE of each argument according to the type previously specified, and report any mismatches. */ if (current_function_prototype_arg_types) { tree type; for (parm = DECL_ARGUMENTS (fndecl), type = current_function_prototype_arg_types; parm || (type && TREE_VALUE (type) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node)); parm = DECL_CHAIN (parm), type = TREE_CHAIN (type)) { if (parm == 0 || type == 0 || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node) { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (fndecl), 0, "number of arguments doesn%'t match " "built-in prototype"); else { /* FIXME diagnostics: This should be the location of FNDECL, but there is bug when a prototype is declared inside function context, but defined outside of it (e.g., gcc.dg/pr15698-2.c). In which case FNDECL gets the location of the prototype, not the definition. */ error_at (input_location, "number of arguments doesn%'t match prototype"); error_at (current_function_prototype_locus, "prototype declaration"); } break; } /* Type for passing arg must be consistent with that declared for the arg. ISO C says we take the unqualified type for parameters declared with qualified type. */ if (TREE_TYPE (parm) != error_mark_node && TREE_TYPE (type) != error_mark_node && ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) != TYPE_ATOMIC (TREE_VALUE (type))) || !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)), TYPE_MAIN_VARIANT (TREE_VALUE (type))))) { if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) == TYPE_ATOMIC (TREE_VALUE (type))) && (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == TYPE_MAIN_VARIANT (TREE_VALUE (type)))) { /* Adjust argument to match prototype. E.g. a previous `int foo(float);' prototype causes `int foo(x) float x; {...}' to be treated like `int foo(float x) {...}'. This is particularly useful for argument types like uid_t. */ DECL_ARG_TYPE (parm) = TREE_TYPE (parm); if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl)) && INTEGRAL_TYPE_P (TREE_TYPE (parm)) && TYPE_PRECISION (TREE_TYPE (parm)) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (parm) = c_type_promotes_to (TREE_TYPE (parm)); /* ??? Is it possible to get here with a built-in prototype or will it always have been diagnosed as conflicting with an old-style definition and discarded? */ if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match built-in prototype", parm); else { pedwarn (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match prototype", parm); pedwarn (current_function_prototype_locus, OPT_Wpedantic, "prototype declaration"); } } else { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), 0, "argument %qD doesn%'t match " "built-in prototype", parm); else { error_at (DECL_SOURCE_LOCATION (parm), "argument %qD doesn%'t match prototype", parm); error_at (current_function_prototype_locus, "prototype declaration"); } } } } TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0; } /* Otherwise, create a prototype that would match. */ else { tree actual = 0, last = 0, type; for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm)) { type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; last = type; } type = tree_cons (NULL_TREE, void_type_node, NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES of the type of this function, but we need to avoid having this affect the types of other similarly-typed functions, so we must first force the generation of an identical (but separate) type node for the relevant function type. The new node we create will be a variant of the main variant of the original function type. */ TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl)); TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual; } } /* Store parameter declarations passed in ARG_INFO into the current function declaration. */ void store_parm_decls_from (struct c_arg_info *arg_info) { current_function_arg_info = arg_info; store_parm_decls (); } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. For an old-style definition, construct a prototype out of the old-style parameter declarations and inject it into the function's type. */ void store_parm_decls (void) { tree fndecl = current_function_decl; bool proto; /* The argument information block for FNDECL. */ struct c_arg_info *arg_info = current_function_arg_info; current_function_arg_info = 0; /* True if this definition is written with a prototype. Note: despite C99 6.7.5.3p14, we can *not* treat an empty argument list in a function definition as equivalent to (void) -- an empty argument list specifies the function has no parameters, but only (void) sets up a prototype for future calls. */ proto = arg_info->types != 0; if (proto) store_parm_decls_newstyle (fndecl, arg_info); else store_parm_decls_oldstyle (fndecl, arg_info); /* The next call to push_scope will be a function body. */ next_is_function_body = true; /* Write a record describing this function definition to the prototypes file (if requested). */ gen_aux_info_record (fndecl, 1, 0, proto); /* Initialize the RTL code for the function. */ allocate_struct_function (fndecl, false); if (warn_unused_local_typedefs) cfun->language = ggc_cleared_alloc<language_function> (); /* Begin the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = push_stmt_list (); /* ??? Insert the contents of the pending sizes list into the function to be evaluated. The only reason left to have this is void foo(int n, int array[n++]) because we throw away the array type in favor of a pointer type, and thus won't naturally see the SAVE_EXPR containing the increment. All other pending sizes would be handled by gimplify_parameters. */ if (arg_info->pending_sizes) add_stmt (arg_info->pending_sizes); } /* Store PARM_DECLs in PARMS into scope temporarily. Used for c_finish_omp_declare_simd for function prototypes. No diagnostics should be done. */ void temp_store_parm_decls (tree fndecl, tree parms) { push_scope (); for (tree p = parms; p; p = DECL_CHAIN (p)) { DECL_CONTEXT (p) = fndecl; if (DECL_NAME (p)) bind (DECL_NAME (p), p, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } } /* Undo what temp_store_parm_decls did. */ void temp_pop_parm_decls (void) { /* Clear all bindings in this temporary scope, so that pop_scope doesn't create a BLOCK. */ struct c_binding *b = current_scope->bindings; current_scope->bindings = NULL; for (; b; b = free_binding_and_advance (b)) { gcc_assert (TREE_CODE (b->decl) == PARM_DECL || b->decl == error_mark_node); gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } pop_scope (); } /* Finish up a function declaration and compile that function all the way to assembler language output. Then free the storage for the function definition. This is called after parsing the body of the function definition. */ void finish_function (void) { tree fndecl = current_function_decl; if (c_dialect_objc ()) objc_finish_function (); if (TREE_CODE (fndecl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (fndecl))) { tree args = DECL_ARGUMENTS (fndecl); for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Must mark the RESULT_DECL as being in this function. */ if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node) DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) == integer_type_node && flag_isoc99) { /* Hack. We don't want the middle-end to warn that this return is unreachable, so we mark its location as special. Using UNKNOWN_LOCATION has the problem that it gets clobbered in annotate_one_with_locus. A cleaner solution might be to ensure ! should_carry_locus_p (stmt), but that needs a flag. */ c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE); } /* Tie off the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); /* If the function has _Cilk_spawn in front of a function call inside it i.e. it is a spawning function, then add the appropriate Cilk plus functions inside. */ if (fn_contains_cilk_spawn_p (cfun)) cfun->cilk_frame_decl = insert_cilk_frame (fndecl); finish_fname_decls (); /* Complain if there's just no return statement. */ if (warn_return_type && TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE && !current_function_returns_value && !current_function_returns_null /* Don't complain if we are no-return. */ && !current_function_returns_abnormally /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) /* Don't warn for main(). */ && !MAIN_NAME_P (DECL_NAME (fndecl)) /* Or if they didn't actually specify a return type. */ && !C_FUNCTION_IMPLICIT_INT (fndecl) /* Normally, with -Wreturn-type, flow will complain, but we might optimize out static functions. */ && !TREE_PUBLIC (fndecl)) { warning (OPT_Wreturn_type, "no return statement in function returning non-void"); TREE_NO_WARNING (fndecl) = 1; } /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = input_location; /* Finalize the ELF visibility for the function. */ c_determine_visibility (fndecl); /* For GNU C extern inline functions disregard inline limits. */ if (DECL_EXTERNAL (fndecl) && DECL_DECLARED_INLINE_P (fndecl)) DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1; /* Genericize before inlining. Delay genericizing nested functions until their parent function is genericized. Since finalizing requires GENERIC, delay that as well. */ if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node && !undef_nested_function) { if (!decl_function_context (fndecl)) { invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); c_genericize (fndecl); /* ??? Objc emits functions after finalizing the compilation unit. This should be cleaned up later and this conditional removed. */ if (symtab->global_info_ready) { cgraph_node::add_new_function (fndecl, false); return; } cgraph_node::finalize_function (fndecl, false); } else { /* Register this function with cgraph just far enough to get it added to our parent's nested function list. Handy, since the C front end doesn't have such a list. */ (void) cgraph_node::get_create (fndecl); } } if (!decl_function_context (fndecl)) undef_nested_function = false; if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); current_function_decl = NULL; } /* Check the declarations given in a for-loop for satisfying the C99 constraints. If exactly one such decl is found, return it. LOC is the location of the opening parenthesis of the for loop. The last parameter allows you to control the "for loop initial declarations are only allowed in C99 mode". Normally, you should pass flag_isoc99 as that parameter. But in some cases (Objective-C foreach loop, for example) we want to run the checks in this function even if not in C99 mode, so we allow the caller to turn off the error about not being in C99 mode. */ tree check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error) { struct c_binding *b; tree one_decl = NULL_TREE; int n_decls = 0; if (!turn_off_iso_c99_error) { static bool hint = true; /* If we get here, declarations have been used in a for loop without the C99 for loop scope. This doesn't make much sense, so don't allow it. */ error_at (loc, "%<for%> loop initial declarations " "are only allowed in C99 or C11 mode"); if (hint) { inform (loc, "use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 " "to compile your code"); hint = false; } return NULL_TREE; } /* C99 subclause 6.8.5 paragraph 3: [#3] The declaration part of a for statement shall only declare identifiers for objects having storage class auto or register. It isn't clear whether, in this sentence, "identifiers" binds to "shall only declare" or to "objects" - that is, whether all identifiers declared must be identifiers for objects, or whether the restriction only applies to those that are. (A question on this in comp.std.c in November 2000 received no answer.) We implement the strictest interpretation, to avoid creating an extension which later causes problems. */ for (b = current_scope->bindings; b; b = b->prev) { tree id = b->id; tree decl = b->decl; if (!id) continue; switch (TREE_CODE (decl)) { case VAR_DECL: { location_t decl_loc = DECL_SOURCE_LOCATION (decl); if (TREE_STATIC (decl)) error_at (decl_loc, "declaration of static variable %qD in %<for%> loop " "initial declaration", decl); else if (DECL_EXTERNAL (decl)) error_at (decl_loc, "declaration of %<extern%> variable %qD in %<for%> loop " "initial declaration", decl); } break; case RECORD_TYPE: error_at (loc, "%<struct %E%> declared in %<for%> loop initial " "declaration", id); break; case UNION_TYPE: error_at (loc, "%<union %E%> declared in %<for%> loop initial declaration", id); break; case ENUMERAL_TYPE: error_at (loc, "%<enum %E%> declared in %<for%> loop " "initial declaration", id); break; default: error_at (loc, "declaration of non-variable " "%qD in %<for%> loop initial declaration", decl); } n_decls++; one_decl = decl; } return n_decls == 1 ? one_decl : NULL_TREE; } /* Save and reinitialize the variables used during compilation of a C function. */ void c_push_function_context (void) { struct language_function *p = cfun->language; /* cfun->language might have been already allocated by the use of -Wunused-local-typedefs. In that case, just re-use it. */ if (p == NULL) cfun->language = p = ggc_cleared_alloc<language_function> (); p->base.x_stmt_tree = c_stmt_tree; c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list); p->x_break_label = c_break_label; p->x_cont_label = c_cont_label; p->x_switch_stack = c_switch_stack; p->arg_info = current_function_arg_info; p->returns_value = current_function_returns_value; p->returns_null = current_function_returns_null; p->returns_abnormally = current_function_returns_abnormally; p->warn_about_return_type = warn_about_return_type; push_function_context (); } /* Restore the variables used during compilation of a C function. */ void c_pop_function_context (void) { struct language_function *p; pop_function_context (); p = cfun->language; /* When -Wunused-local-typedefs is in effect, cfun->languages is used to store data throughout the life time of the current cfun, So don't deallocate it. */ if (!warn_unused_local_typedefs) cfun->language = NULL; if (DECL_STRUCT_FUNCTION (current_function_decl) == 0 && DECL_SAVED_TREE (current_function_decl) == NULL_TREE) { /* Stop pointing to the local nodes about to be freed. */ /* But DECL_INITIAL must remain nonzero so we know this was an actual function definition. */ DECL_INITIAL (current_function_decl) = error_mark_node; DECL_ARGUMENTS (current_function_decl) = 0; } c_stmt_tree = p->base.x_stmt_tree; p->base.x_stmt_tree.x_cur_stmt_list = NULL; c_break_label = p->x_break_label; c_cont_label = p->x_cont_label; c_switch_stack = p->x_switch_stack; current_function_arg_info = p->arg_info; current_function_returns_value = p->returns_value; current_function_returns_null = p->returns_null; current_function_returns_abnormally = p->returns_abnormally; warn_about_return_type = p->warn_about_return_type; } /* The functions below are required for functionality of doing function at once processing in the C front end. Currently these functions are not called from anywhere in the C front end, but as these changes continue, that will change. */ /* Returns the stmt_tree (if any) to which statements are currently being added. If there is no active statement-tree, NULL is returned. */ stmt_tree current_stmt_tree (void) { return &c_stmt_tree; } /* Return the global value of T as a symbol. */ tree identifier_global_value (tree t) { struct c_binding *b; for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return 0; } /* In C, the only C-linkage public declaration is at file scope. */ tree c_linkage_bindings (tree name) { return identifier_global_value (name); } /* Record a builtin type for C. If NAME is non-NULL, it is the name used; otherwise the name is found in ridpointers from RID_INDEX. */ void record_builtin_type (enum rid rid_index, const char *name, tree type) { tree id, decl; if (name == 0) id = ridpointers[(int) rid_index]; else id = get_identifier (name); decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type); pushdecl (decl); if (debug_hooks->type_decl) debug_hooks->type_decl (decl, false); } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } /* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */ struct c_parm * build_c_parm (struct c_declspecs *specs, tree attrs, struct c_declarator *declarator) { struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm); ret->specs = specs; ret->attrs = attrs; ret->declarator = declarator; return ret; } /* Return a declarator with nested attributes. TARGET is the inner declarator to which these attributes apply. ATTRS are the attributes. */ struct c_declarator * build_attrs_declarator (tree attrs, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_attrs; ret->declarator = target; ret->u.attrs = attrs; return ret; } /* Return a declarator for a function with arguments specified by ARGS and return type specified by TARGET. */ struct c_declarator * build_function_declarator (struct c_arg_info *args, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_function; ret->declarator = target; ret->u.arg_info = args; return ret; } /* Return a declarator for the identifier IDENT (which may be NULL_TREE for an abstract declarator). */ struct c_declarator * build_id_declarator (tree ident) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_id; ret->declarator = 0; ret->u.id = ident; /* Default value - may get reset to a more precise location. */ ret->id_loc = input_location; return ret; } /* Return something to represent absolute declarators containing a *. TARGET is the absolute declarator that the * contains. TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes to apply to the pointer type. */ struct c_declarator * make_pointer_declarator (struct c_declspecs *type_quals_attrs, struct c_declarator *target) { tree attrs; int quals = 0; struct c_declarator *itarget = target; struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); if (type_quals_attrs) { attrs = type_quals_attrs->attrs; quals = quals_from_declspecs (type_quals_attrs); if (attrs != NULL_TREE) itarget = build_attrs_declarator (attrs, target); } ret->kind = cdk_pointer; ret->declarator = itarget; ret->u.pointer_quals = quals; return ret; } /* Return a pointer to a structure for an empty list of declaration specifiers. */ struct c_declspecs * build_null_declspecs (void) { struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs); memset (&ret->locations, 0, cdw_number_of_elements); ret->type = 0; ret->expr = 0; ret->decl_attr = 0; ret->attrs = 0; ret->align_log = -1; ret->typespec_word = cts_none; ret->storage_class = csc_none; ret->expr_const_operands = true; ret->declspecs_seen_p = false; ret->typespec_kind = ctsk_none; ret->non_sc_seen_p = false; ret->typedef_p = false; ret->explicit_signed_p = false; ret->deprecated_p = false; ret->default_int_p = false; ret->long_p = false; ret->long_long_p = false; ret->short_p = false; ret->signed_p = false; ret->unsigned_p = false; ret->complex_p = false; ret->inline_p = false; ret->noreturn_p = false; ret->thread_p = false; ret->thread_gnu_p = false; ret->const_p = false; ret->volatile_p = false; ret->atomic_p = false; ret->restrict_p = false; ret->saturating_p = false; ret->alignas_p = false; ret->address_space = ADDR_SPACE_GENERIC; return ret; } /* Add the address space ADDRSPACE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_addrspace (source_location location, struct c_declspecs *specs, addr_space_t as) { specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; if (!ADDR_SPACE_GENERIC_P (specs->address_space) && specs->address_space != as) error ("incompatible address space qualifiers %qs and %qs", c_addr_space_name (as), c_addr_space_name (specs->address_space)); else { specs->address_space = as; specs->locations[cdw_address_space] = location; } return specs; } /* Add the type qualifier QUAL to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_qual (source_location loc, struct c_declspecs *specs, tree qual) { enum rid i; bool dupe = false; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (qual)); i = C_RID_CODE (qual); switch (i) { case RID_CONST: dupe = specs->const_p; specs->const_p = true; specs->locations[cdw_const] = loc; break; case RID_VOLATILE: dupe = specs->volatile_p; specs->volatile_p = true; specs->locations[cdw_volatile] = loc; break; case RID_RESTRICT: dupe = specs->restrict_p; specs->restrict_p = true; specs->locations[cdw_restrict] = loc; break; case RID_ATOMIC: dupe = specs->atomic_p; specs->atomic_p = true; break; default: gcc_unreachable (); } if (dupe) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %qE", qual); return specs; } /* Add the type specifier TYPE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_type (location_t loc, struct c_declspecs *specs, struct c_typespec spec) { tree type = spec.spec; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->typespec_kind = spec.kind; if (TREE_DEPRECATED (type)) specs->deprecated_p = true; /* Handle type specifier keywords. */ if (TREE_CODE (type) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (type) && C_RID_CODE (type) != RID_CXX_COMPAT_WARN) { enum rid i = C_RID_CODE (type); if (specs->type) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } if ((int) i <= (int) RID_LAST_MODIFIER) { /* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */ bool dupe = false; switch (i) { case RID_LONG: if (specs->long_long_p) { error_at (loc, "%<long long long%> is too long for GCC"); break; } if (specs->long_p) { if (specs->typespec_word == cts_double) { error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); break; } pedwarn_c90 (loc, OPT_Wlong_long, "ISO C90 does not support %<long long%>"); specs->long_long_p = 1; specs->locations[cdw_long_long] = loc; break; } if (specs->short_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<long%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<long%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<long%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<long%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->long_p = true; specs->locations[cdw_long] = loc; } break; case RID_SHORT: dupe = specs->short_p; if (specs->long_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<short%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<short%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<short%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<short%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->short_p = true; specs->locations[cdw_short] = loc; } break; case RID_SIGNED: dupe = specs->signed_p; if (specs->unsigned_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<signed%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<signed%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<signed%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->signed_p = true; specs->locations[cdw_signed] = loc; } break; case RID_UNSIGNED: dupe = specs->unsigned_p; if (specs->signed_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<unsigned%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<unsigned%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<unsigned%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->unsigned_p = true; specs->locations[cdw_unsigned] = loc; } break; case RID_COMPLEX: dupe = specs->complex_p; if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<complex%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<complex%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<complex%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->typespec_word == cts_fract) error_at (loc, ("both %<complex%> and %<_Fract%> in " "declaration specifiers")); else if (specs->typespec_word == cts_accum) error_at (loc, ("both %<complex%> and %<_Accum%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<complex%> and %<_Sat%> in " "declaration specifiers")); else { specs->complex_p = true; specs->locations[cdw_complex] = loc; } break; case RID_SAT: dupe = specs->saturating_p; pedwarn (loc, OPT_Wpedantic, "ISO C does not support saturating types"); if (specs->typespec_word == cts_int_n) { error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); } else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<_Sat%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<_Sat%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<_Sat%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<_Sat%> and %<complex%> in " "declaration specifiers")); else { specs->saturating_p = true; specs->locations[cdw_saturating] = loc; } break; default: gcc_unreachable (); } if (dupe) error_at (loc, "duplicate %qE", type); return specs; } else { /* "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "__intN", "_Decimal64", "_Decimal128", "_Fract", "_Accum" or "__auto_type". */ if (specs->typespec_word != cts_none) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } switch (i) { case RID_AUTO_TYPE: if (specs->long_p) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else { specs->typespec_word = cts_auto_type; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: specs->int_n_idx = i - RID_INT_N_0; if (!in_system_header_at (input_location)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support %<__int%d%> types", int_n_data[specs->int_n_idx].bitsize); if (specs->long_p) error_at (loc, ("both %<__int%d%> and %<long%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->short_p) error_at (loc, ("both %<__int%d%> and %<short%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (! int_n_enabled_p [specs->int_n_idx]) error_at (loc, "%<__int%d%> is not supported on this target", int_n_data[specs->int_n_idx].bitsize); else { specs->typespec_word = cts_int_n; specs->locations[cdw_typespec] = loc; } return specs; case RID_VOID: if (specs->long_p) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else { specs->typespec_word = cts_void; specs->locations[cdw_typespec] = loc; } return specs; case RID_BOOL: if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support boolean types"); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else { specs->typespec_word = cts_bool; specs->locations[cdw_typespec] = loc; } return specs; case RID_CHAR: if (specs->long_p) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else { specs->typespec_word = cts_char; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT: if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else { specs->typespec_word = cts_int; specs->locations[cdw_typespec] = loc; } return specs; case RID_FLOAT: if (specs->long_p) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else { specs->typespec_word = cts_float; specs->locations[cdw_typespec] = loc; } return specs; case RID_DOUBLE: if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else { specs->typespec_word = cts_double; specs->locations[cdw_typespec] = loc; } return specs; case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: { const char *str; if (i == RID_DFLOAT32) str = "_Decimal32"; else if (i == RID_DFLOAT64) str = "_Decimal64"; else str = "_Decimal128"; if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<%s%> in " "declaration specifiers"), str); if (specs->long_p) error_at (loc, ("both %<long%> and %<%s%> in " "declaration specifiers"), str); else if (specs->short_p) error_at (loc, ("both %<short%> and %<%s%> in " "declaration specifiers"), str); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<%s%> in " "declaration specifiers"), str); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<%s%> in " "declaration specifiers"), str); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_DFLOAT32) specs->typespec_word = cts_dfloat32; else if (i == RID_DFLOAT64) specs->typespec_word = cts_dfloat64; else specs->typespec_word = cts_dfloat128; specs->locations[cdw_typespec] = loc; } if (!targetm.decimal_float_supported_p ()) error_at (loc, ("decimal floating point not supported " "for this target")); pedwarn (loc, OPT_Wpedantic, "ISO C does not support decimal floating point"); return specs; case RID_FRACT: case RID_ACCUM: { const char *str; if (i == RID_FRACT) str = "_Fract"; else str = "_Accum"; if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_FRACT) specs->typespec_word = cts_fract; else specs->typespec_word = cts_accum; specs->locations[cdw_typespec] = loc; } if (!targetm.fixed_point_supported_p ()) error_at (loc, "fixed-point types not supported for this target"); pedwarn (loc, OPT_Wpedantic, "ISO C does not support fixed-point types"); return specs; default: /* ObjC reserved word "id", handled below. */ break; } } } /* Now we have a typedef (a TYPE_DECL node), an identifier (some form of ObjC type, cases such as "int" and "long" being handled above), a TYPE (struct, union, enum and typeof specifiers) or an ERROR_MARK. In none of these cases may there have previously been any type specifiers. */ if (specs->type || specs->typespec_word != cts_none || specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p || specs->complex_p) error_at (loc, "two or more data types in declaration specifiers"); else if (TREE_CODE (type) == TYPE_DECL) { if (TREE_TYPE (type) == error_mark_node) ; /* Allow the type to default to int to avoid cascading errors. */ else { specs->type = TREE_TYPE (type); specs->decl_attr = DECL_ATTRIBUTES (type); specs->typedef_p = true; specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type); specs->locations[cdw_typedef] = loc; /* If this typedef name is defined in a struct, then a C++ lookup would return a different value. */ if (warn_cxx_compat && I_SYMBOL_BINDING (DECL_NAME (type))->in_struct) warning_at (loc, OPT_Wc___compat, "C++ lookup of %qD would return a field, not a type", type); /* If we are parsing a struct, record that a struct field used a typedef. */ if (warn_cxx_compat && struct_parse_info != NULL) struct_parse_info->typedefs_seen.safe_push (type); } } else if (TREE_CODE (type) == IDENTIFIER_NODE) { tree t = lookup_name (type); if (!t || TREE_CODE (t) != TYPE_DECL) error_at (loc, "%qE fails to be a typedef or built in type", type); else if (TREE_TYPE (t) == error_mark_node) ; else { specs->type = TREE_TYPE (t); specs->locations[cdw_typespec] = loc; } } else { if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof) { specs->typedef_p = true; specs->locations[cdw_typedef] = loc; if (spec.expr) { if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr), specs->expr, spec.expr); else specs->expr = spec.expr; specs->expr_const_operands &= spec.expr_const_operands; } } specs->type = type; } return specs; } /* Add the storage class specifier or function specifier SCSPEC to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_scspec (source_location loc, struct c_declspecs *specs, tree scspec) { enum rid i; enum c_storage_class n = csc_none; bool dupe = false; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (scspec)); i = C_RID_CODE (scspec); if (specs->non_sc_seen_p) warning (OPT_Wold_style_declaration, "%qE is not at beginning of declaration", scspec); switch (i) { case RID_INLINE: /* C99 permits duplicate inline. Although of doubtful utility, it seems simplest to permit it in gnu89 mode as well, as there is also little utility in maintaining this as a difference between gnu89 and C99 inline. */ dupe = false; specs->inline_p = true; specs->locations[cdw_inline] = loc; break; case RID_NORETURN: /* Duplicate _Noreturn is permitted. */ dupe = false; specs->noreturn_p = true; specs->locations[cdw_noreturn] = loc; break; case RID_THREAD: dupe = specs->thread_p; if (specs->storage_class == csc_auto) error ("%qE used with %<auto%>", scspec); else if (specs->storage_class == csc_register) error ("%qE used with %<register%>", scspec); else if (specs->storage_class == csc_typedef) error ("%qE used with %<typedef%>", scspec); else { specs->thread_p = true; specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec), "__thread") == 0); /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spelling. */ if (!specs->thread_gnu_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %qE", scspec); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %qE", scspec); } specs->locations[cdw_thread] = loc; } break; case RID_AUTO: n = csc_auto; break; case RID_EXTERN: n = csc_extern; /* Diagnose "__thread extern". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<extern%>"); break; case RID_REGISTER: n = csc_register; break; case RID_STATIC: n = csc_static; /* Diagnose "__thread static". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<static%>"); break; case RID_TYPEDEF: n = csc_typedef; break; default: gcc_unreachable (); } if (n != csc_none && n == specs->storage_class) dupe = true; if (dupe) { if (i == RID_THREAD) error ("duplicate %<_Thread_local%> or %<__thread%>"); else error ("duplicate %qE", scspec); } if (n != csc_none) { if (specs->storage_class != csc_none && n != specs->storage_class) { error ("multiple storage classes in declaration specifiers"); } else { specs->storage_class = n; specs->locations[cdw_storage_class] = loc; if (n != csc_extern && n != csc_static && specs->thread_p) { error ("%qs used with %qE", specs->thread_gnu_p ? "__thread" : "_Thread_local", scspec); specs->thread_p = false; } } } return specs; } /* Add the attributes ATTRS to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs) { specs->attrs = chainon (attrs, specs->attrs); specs->locations[cdw_attributes] = loc; specs->declspecs_seen_p = true; return specs; } /* Add an _Alignas specifier (expression ALIGN, or type whose alignment is ALIGN) to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_alignas (source_location loc, struct c_declspecs *specs, tree align) { int align_log; specs->alignas_p = true; specs->locations[cdw_alignas] = loc; if (align == error_mark_node) return specs; align_log = check_user_alignment (align, true); if (align_log > specs->align_log) specs->align_log = align_log; return specs; } /* Combine "long", "short", "signed", "unsigned" and "_Complex" type specifiers with any other type specifier to determine the resulting type. This is where ISO C checks on complex types are made, since "_Complex long" is a prefix of the valid ISO C type "_Complex long double". */ struct c_declspecs * finish_declspecs (struct c_declspecs *specs) { /* If a type was specified as a whole, we have no modifiers and are done. */ if (specs->type != NULL_TREE) { gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Set a dummy type. */ if (TREE_CODE (specs->type) == ERROR_MARK) specs->type = integer_type_node; return specs; } /* If none of "void", "_Bool", "char", "int", "float" or "double" has been specified, treat it as "int" unless "_Complex" is present and there are no other specifiers. If we just have "_Complex", it is equivalent to "_Complex double", but e.g. "_Complex short" is equivalent to "_Complex short int". */ if (specs->typespec_word == cts_none) { if (specs->saturating_p) { error_at (specs->locations[cdw_saturating], "%<_Sat%> is used without %<_Fract%> or %<_Accum%>"); if (!targetm.fixed_point_supported_p ()) error_at (specs->locations[cdw_saturating], "fixed-point types not supported for this target"); specs->typespec_word = cts_fract; } else if (specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p) { specs->typespec_word = cts_int; } else if (specs->complex_p) { specs->typespec_word = cts_double; pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support plain %<complex%> meaning " "%<double complex%>"); } else { specs->typespec_word = cts_int; specs->default_int_p = true; /* We don't diagnose this here because grokdeclarator will give more specific diagnostics according to whether it is a function definition. */ } } /* If "signed" was specified, record this to distinguish "int" and "signed int" in the case of a bit-field with -funsigned-bitfields. */ specs->explicit_signed_p = specs->signed_p; /* Now compute the actual type. */ switch (specs->typespec_word) { case cts_auto_type: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Type to be filled in later. */ break; case cts_void: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = void_type_node; break; case cts_bool: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = boolean_type_node; break; case cts_char: gcc_assert (!specs->long_p && !specs->short_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->signed_p) specs->type = signed_char_type_node; else if (specs->unsigned_p) specs->type = unsigned_char_type_node; else specs->type = char_type_node; if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int_n: gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); specs->type = (specs->unsigned_p ? int_n_trees[specs->int_n_idx].unsigned_type : int_n_trees[specs->int_n_idx].signed_type); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int: gcc_assert (!(specs->long_p && specs->short_p)); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->long_long_p) specs->type = (specs->unsigned_p ? long_long_unsigned_type_node : long_long_integer_type_node); else if (specs->long_p) specs->type = (specs->unsigned_p ? long_unsigned_type_node : long_integer_type_node); else if (specs->short_p) specs->type = (specs->unsigned_p ? short_unsigned_type_node : short_integer_type_node); else specs->type = (specs->unsigned_p ? unsigned_type_node : integer_type_node); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_float: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); specs->type = (specs->complex_p ? complex_float_type_node : float_type_node); break; case cts_double: gcc_assert (!specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (specs->long_p) { specs->type = (specs->complex_p ? complex_long_double_type_node : long_double_type_node); } else { specs->type = (specs->complex_p ? complex_double_type_node : double_type_node); } break; case cts_dfloat32: case cts_dfloat64: case cts_dfloat128: gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); if (specs->typespec_word == cts_dfloat32) specs->type = dfloat32_type_node; else if (specs->typespec_word == cts_dfloat64) specs->type = dfloat64_type_node; else specs->type = dfloat128_type_node; break; case cts_fract: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_fract_type_node : sat_long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_fract_type_node : sat_long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_fract_type_node : sat_short_fract_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_fract_type_node : sat_fract_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_fract_type_node : long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_fract_type_node : long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_fract_type_node : short_fract_type_node; else specs->type = specs->unsigned_p ? unsigned_fract_type_node : fract_type_node; } break; case cts_accum: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_accum_type_node : sat_long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_accum_type_node : sat_long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_accum_type_node : sat_short_accum_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_accum_type_node : sat_accum_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_accum_type_node : long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_accum_type_node : long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_accum_type_node : short_accum_type_node; else specs->type = specs->unsigned_p ? unsigned_accum_type_node : accum_type_node; } break; default: gcc_unreachable (); } return specs; } /* A subroutine of c_write_global_declarations. Perform final processing on one file scope's declarations (or the external scope's declarations), GLOBALS. */ static void c_write_global_declarations_1 (tree globals) { tree decl; bool reconsider; /* Process the decls in the order they were written. */ for (decl = globals; decl; decl = DECL_CHAIN (decl)) { /* Check for used but undefined static functions using the C standard's definition of "used", and set TREE_NO_WARNING so that check_global_declarations doesn't repeat the check. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == 0 && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl) && C_DECL_USED (decl)) { pedwarn (input_location, 0, "%q+F used but never defined", decl); TREE_NO_WARNING (decl) = 1; } wrapup_global_declaration_1 (decl); } do { reconsider = false; for (decl = globals; decl; decl = DECL_CHAIN (decl)) reconsider |= wrapup_global_declaration_2 (decl); } while (reconsider); for (decl = globals; decl; decl = DECL_CHAIN (decl)) check_global_declaration_1 (decl); } /* A subroutine of c_write_global_declarations Emit debug information for each of the declarations in GLOBALS. */ static void c_write_global_declarations_2 (tree globals) { tree decl; for (decl = globals; decl ; decl = DECL_CHAIN (decl)) debug_hooks->global_decl (decl); } /* Callback to collect a source_ref from a DECL. */ static void collect_source_ref_cb (tree decl) { if (!DECL_IS_BUILTIN (decl)) collect_source_ref (LOCATION_FILE (decl_sloc (decl, false))); } /* Preserve the external declarations scope across a garbage collect. */ static GTY(()) tree ext_block; /* Collect all references relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { tree t; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file); collect_ada_nodes (BLOCK_VARS (ext_block), source_file); } /* Iterate over all global declarations and call CALLBACK. */ static void for_each_global_decl (void (*callback) (tree decl)) { tree t; tree decls; tree decl; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) { decls = DECL_INITIAL (t); for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl)) callback (decl); } for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl)) callback (decl); } void c_write_global_declarations (void) { tree t; unsigned i; /* We don't want to do this if generating a PCH. */ if (pch_file) return; timevar_start (TV_PHASE_DEFERRED); /* Do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). */ if (c_dialect_objc ()) objc_write_global_declarations (); /* Close the external scope. */ ext_block = pop_scope (); external_scope = 0; gcc_assert (!current_scope); /* Handle -fdump-ada-spec[-slim]. */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { /* Build a table of files to generate specs for */ if (flag_dump_ada_spec_slim) collect_source_ref (main_input_filename); else for_each_global_decl (collect_source_ref_cb); dump_ada_specs (collect_all_refs, NULL); } if (ext_block) { tree tmp = BLOCK_VARS (ext_block); int flags; FILE * stream = dump_begin (TDI_tu, &flags); if (stream && tmp) { dump_node (tmp, flags & ~TDF_SLIM, stream); dump_end (TDI_tu, stream); } } /* Process all file scopes in this compilation, and the external_scope, through wrapup_global_declarations and check_global_declarations. */ FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_1 (BLOCK_VARS (ext_block)); timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_OPT_GEN); /* We're done parsing; proceed to optimize and emit assembly. FIXME: shouldn't be the front end's responsibility to call this. */ symtab->finalize_compilation_unit (); timevar_stop (TV_PHASE_OPT_GEN); timevar_start (TV_PHASE_DBGINFO); /* After cgraph has had a chance to emit everything that's going to be emitted, output debug information for globals. */ if (!seen_error ()) { timevar_push (TV_SYMOUT); FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_2 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_2 (BLOCK_VARS (ext_block)); timevar_pop (TV_SYMOUT); } ext_block = NULL; timevar_stop (TV_PHASE_DBGINFO); } /* Register reserved keyword WORD as qualifier for address space AS. */ void c_register_addr_space (const char *word, addr_space_t as) { int rid = RID_FIRST_ADDR_SPACE + as; tree id; /* Address space qualifiers are only supported in C with GNU extensions enabled. */ if (c_dialect_objc () || flag_no_asm) return; id = get_identifier (word); C_SET_RID_CODE (id, rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [rid] = id; } /* Return identifier to look up for omp declare reduction. */ tree c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id) { const char *p = NULL; switch (reduction_code) { case PLUS_EXPR: p = "+"; break; case MULT_EXPR: p = "*"; break; case MINUS_EXPR: p = "-"; break; case BIT_AND_EXPR: p = "&"; break; case BIT_XOR_EXPR: p = "^"; break; case BIT_IOR_EXPR: p = "|"; break; case TRUTH_ANDIF_EXPR: p = "&&"; break; case TRUTH_ORIF_EXPR: p = "||"; break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); size_t len = strlen (p); char *name = XALLOCAVEC (char, lenp + len); memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); return get_identifier (name); } /* Lookup REDUCTION_ID in the current scope, or create an artificial VAR_DECL, bind it into the current scope and return it. */ tree c_omp_reduction_decl (tree reduction_id) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); if (b != NULL && B_IN_CURRENT_SCOPE (b)) return b->decl; tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL, reduction_id, integer_type_node); DECL_ARTIFICIAL (decl) = 1; DECL_EXTERNAL (decl) = 1; TREE_STATIC (decl) = 1; TREE_PUBLIC (decl) = 0; bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION); return decl; } /* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */ tree c_omp_reduction_lookup (tree reduction_id, tree type) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); while (b) { tree t; for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) return TREE_VALUE (t); b = b->shadowed; } return error_mark_node; } /* Helper function called via walk_tree, to diagnose invalid #pragma omp declare reduction combiners or initializers. */ tree c_check_omp_declare_reduction_r (tree *tp, int *, void *data) { tree *vars = (tree *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != vars[0] && *tp != vars[1]) { location_t loc = DECL_SOURCE_LOCATION (vars[0]); if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } #include "gt-c-c-decl.h"
/* Process declarations and symbol lookup for C front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "input.h" #include "tm.h" #include "intl.h" #include "hash-set.h" #include "vec.h" #include "symtab.h" #include "input.h" #include "alias.h" #include "double-int.h" #include "machmode.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "print-tree.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "stringpool.h" #include "tree-inline.h" #include "flags.h" #include "hashtab.h" #include "hash-set.h" #include "vec.h" #include "machmode.h" #include "hard-reg-set.h" #include "function.h" #include "c-tree.h" #include "toplev.h" #include "tm_p.h" #include "cpplib.h" #include "target.h" #include "debug.h" #include "opts.h" #include "timevar.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "c-lang.h" #include "langhooks.h" #include "tree-iterator.h" #include "diagnostic-core.h" #include "dumpfile.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" #include "hash-table.h" #include "langhooks-def.h" #include "plugin.h" #include "c-family/c-ada-spec.h" #include "cilk.h" #include "builtins.h" /* In grokdeclarator, distinguish syntactic contexts of declarators. */ enum decl_context { NORMAL, /* Ordinary declaration */ FUNCDEF, /* Function definition */ PARM, /* Declaration of parm before function body */ FIELD, /* Declaration inside struct or union */ TYPENAME}; /* Typename (inside cast or sizeof) */ /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states { DEPRECATED_NORMAL, DEPRECATED_SUPPRESS }; /* Nonzero if we have seen an invalid cross reference to a struct, union, or enum, but not yet printed the message. */ tree pending_invalid_xref; /* File and line to appear in the eventual error message. */ location_t pending_invalid_xref_location; /* The file and line that the prototype came from if this is an old-style definition; used for diagnostics in store_parm_decls_oldstyle. */ static location_t current_function_prototype_locus; /* Whether this prototype was built-in. */ static bool current_function_prototype_built_in; /* The argument type information of this prototype. */ static tree current_function_prototype_arg_types; /* The argument information structure for the function currently being defined. */ static struct c_arg_info *current_function_arg_info; /* The obstack on which parser and related data structures, which are not live beyond their top-level declaration or definition, are allocated. */ struct obstack parser_obstack; /* The current statement tree. */ static GTY(()) struct stmt_tree_s c_stmt_tree; /* State saving variables. */ tree c_break_label; tree c_cont_label; /* A list of decls to be made automatically visible in each file scope. */ static GTY(()) tree visible_builtins; /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ int current_function_returns_abnormally; /* Set to nonzero by `grokdeclarator' for a function whose return type is defaulted, if warnings for this are desired. */ static int warn_about_return_type; /* Nonzero when the current toplevel function contains a declaration of a nested function which is never defined. */ static bool undef_nested_function; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int current_omp_declare_target_attribute; /* Each c_binding structure describes one binding of an identifier to a decl. All the decls in a scope - irrespective of namespace - are chained together by the ->prev field, which (as the name implies) runs in reverse order. All the decls in a given namespace bound to a given identifier are chained by the ->shadowed field, which runs from inner to outer scopes. The ->decl field usually points to a DECL node, but there are two exceptions. In the namespace of type tags, the bound entity is a RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared identifier is encountered, it is bound to error_mark_node to suppress further errors about that identifier in the current function. The ->u.type field stores the type of the declaration in this scope; if NULL, the type is the type of the ->decl field. This is only of relevance for objects with external or internal linkage which may be redeclared in inner scopes, forming composite types that only persist for the duration of those scopes. In the external scope, this stores the composite of all the types declared for this object, visible or not. The ->inner_comp field (used only at file scope) stores whether an incomplete array type at file scope was completed at an inner scope to an array size other than 1. The ->u.label field is used for labels. It points to a structure which stores additional information used for warnings. The depth field is copied from the scope structure that holds this decl. It is used to preserve the proper ordering of the ->shadowed field (see bind()) and also for a handful of special-case checks. Finally, the invisible bit is true for a decl which should be ignored for purposes of normal name lookup, and the nested bit is true for a decl that's been bound a second time in an inner scope; in all such cases, the binding in the outer scope will have its invisible bit true. */ struct GTY((chain_next ("%h.prev"))) c_binding { union GTY(()) { /* first so GTY desc can use decl */ tree GTY((tag ("0"))) type; /* the type in this scope */ struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */ } GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u; tree decl; /* the decl bound */ tree id; /* the identifier it's bound to */ struct c_binding *prev; /* the previous decl in this scope */ struct c_binding *shadowed; /* the innermost decl shadowed by this one */ unsigned int depth : 28; /* depth of this scope */ BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */ BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */ BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */ BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */ location_t locus; /* location for nested bindings */ }; #define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth) #define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth) #define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/) #define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/) /* Each C symbol points to three linked lists of c_binding structures. These describe the values of the identifier in the three different namespaces defined by the language. */ struct GTY(()) lang_identifier { struct c_common_identifier common_id; struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */ struct c_binding *tag_binding; /* struct/union/enum tags */ struct c_binding *label_binding; /* labels */ }; /* Validate c-lang.c's assumptions. */ extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate [(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1]; /* The binding oracle; see c-tree.h. */ void (*c_binding_oracle) (enum c_oracle_request, tree identifier); /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's symbol binding. */ #define I_SYMBOL_CHECKED(node) \ (TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding* * i_symbol_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->symbol_binding == NULL && c_binding_oracle != NULL && !I_SYMBOL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_SYMBOL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_SYMBOL, node); } return &lid->symbol_binding; } #define I_SYMBOL_BINDING(node) (*i_symbol_binding (node)) #define I_SYMBOL_DECL(node) \ (I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's tag binding. */ #define I_TAG_CHECKED(node) \ (TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_tag_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->tag_binding == NULL && c_binding_oracle != NULL && !I_TAG_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_TAG_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_TAG, node); } return &lid->tag_binding; } #define I_TAG_BINDING(node) (*i_tag_binding (node)) #define I_TAG_DECL(node) \ (I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's label binding. */ #define I_LABEL_CHECKED(node) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_label_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->label_binding == NULL && c_binding_oracle != NULL && !I_LABEL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_LABEL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_LABEL, node); } return &lid->label_binding; } #define I_LABEL_BINDING(node) (*i_label_binding (node)) #define I_LABEL_DECL(node) \ (I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0) /* The resulting tree type. */ union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("0"), desc ("tree_node_structure (&%h)"))) generic; struct lang_identifier GTY ((tag ("1"))) identifier; }; /* Track bindings and other things that matter for goto warnings. For efficiency, we do not gather all the decls at the point of definition. Instead, we point into the bindings structure. As scopes are popped, we update these structures and gather the decls that matter at that time. */ struct GTY(()) c_spot_bindings { /* The currently open scope which holds bindings defined when the label was defined or the goto statement was found. */ struct c_scope *scope; /* The bindings in the scope field which were defined at the point of the label or goto. This lets us look at older or newer bindings in the scope, as appropriate. */ struct c_binding *bindings_in_scope; /* The number of statement expressions that have started since this label or goto statement was defined. This is zero if we are at the same statement expression level. It is positive if we are in a statement expression started since this spot. It is negative if this spot was in a statement expression and we have left it. */ int stmt_exprs; /* Whether we started in a statement expression but are no longer in it. This is set to true if stmt_exprs ever goes negative. */ bool left_stmt_expr; }; /* This structure is used to keep track of bindings seen when a goto statement is defined. This is only used if we see the goto statement before we see the label. */ struct GTY(()) c_goto_bindings { /* The location of the goto statement. */ location_t loc; /* The bindings of the goto statement. */ struct c_spot_bindings goto_bindings; }; typedef struct c_goto_bindings *c_goto_bindings_p; /* The additional information we keep track of for a label binding. These fields are updated as scopes are popped. */ struct GTY(()) c_label_vars { /* The shadowed c_label_vars, when one label shadows another (which can only happen using a __label__ declaration). */ struct c_label_vars *shadowed; /* The bindings when the label was defined. */ struct c_spot_bindings label_bindings; /* A list of decls that we care about: decls about which we should warn if a goto branches to this label from later in the function. Decls are added to this list as scopes are popped. We only add the decls that matter. */ vec<tree, va_gc> *decls_in_scope; /* A list of goto statements to this label. This is only used for goto statements seen before the label was defined, so that we can issue appropriate warnings for them. */ vec<c_goto_bindings_p, va_gc> *gotos; }; /* Each c_scope structure describes the complete contents of one scope. Four scopes are distinguished specially: the innermost or current scope, the innermost function scope, the file scope (always the second to outermost) and the outermost or external scope. Most declarations are recorded in the current scope. All normal label declarations are recorded in the innermost function scope, as are bindings of undeclared identifiers to error_mark_node. (GCC permits nested functions as an extension, hence the 'innermost' qualifier.) Explicitly declared labels (using the __label__ extension) appear in the current scope. Being in the file scope (current_scope == file_scope) causes special behavior in several places below. Also, under some conditions the Objective-C front end records declarations in the file scope even though that isn't the current scope. All declarations with external linkage are recorded in the external scope, even if they aren't visible there; this models the fact that such declarations are visible to the entire program, and (with a bit of cleverness, see pushdecl) allows diagnosis of some violations of C99 6.2.2p7 and 6.2.7p2: If, within the same translation unit, the same identifier appears with both internal and external linkage, the behavior is undefined. All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined. Initially only the built-in declarations, which describe compiler intrinsic functions plus a subset of the standard library, are in this scope. The order of the blocks list matters, and it is frequently appended to. To avoid having to walk all the way to the end of the list on each insertion, or reverse the list later, we maintain a pointer to the last list entry. (FIXME: It should be feasible to use a reversed list here.) The bindings list is strictly in reverse order of declarations; pop_scope relies on this. */ struct GTY((chain_next ("%h.outer"))) c_scope { /* The scope containing this one. */ struct c_scope *outer; /* The next outermost function scope. */ struct c_scope *outer_function; /* All bindings in this scope. */ struct c_binding *bindings; /* For each scope (except the global one), a chain of BLOCK nodes for all the scopes that were entered and exited one level down. */ tree blocks; tree blocks_last; /* The depth of this scope. Used to keep the ->shadowed chain of bindings sorted innermost to outermost. */ unsigned int depth : 28; /* True if we are currently filling this scope with parameter declarations. */ BOOL_BITFIELD parm_flag : 1; /* True if we saw [*] in this scope. Used to give an error messages if these appears in a function definition. */ BOOL_BITFIELD had_vla_unspec : 1; /* True if we already complained about forward parameter decls in this scope. This prevents double warnings on foo (int a; int b; ...) */ BOOL_BITFIELD warned_forward_parm_decls : 1; /* True if this is the outermost block scope of a function body. This scope contains the parameters, the local variables declared in the outermost block, and all the labels (except those in nested functions, or declared at block scope with __label__). */ BOOL_BITFIELD function_body : 1; /* True means make a BLOCK for this scope no matter what. */ BOOL_BITFIELD keep : 1; /* True means that an unsuffixed float constant is _Decimal64. */ BOOL_BITFIELD float_const_decimal64 : 1; /* True if this scope has any label bindings. This is used to speed up searching for labels when popping scopes, particularly since labels are normally only found at function scope. */ BOOL_BITFIELD has_label_bindings : 1; /* True if we should issue a warning if a goto statement crosses any of the bindings. We still need to check the list of bindings to find the specific ones we need to warn about. This is true if decl_jump_unsafe would return true for any of the bindings. This is used to avoid looping over all the bindings unnecessarily. */ BOOL_BITFIELD has_jump_unsafe_decl : 1; }; /* The scope currently in effect. */ static GTY(()) struct c_scope *current_scope; /* The innermost function scope. Ordinary (not explicitly declared) labels, bindings to error_mark_node, and the lazily-created bindings of __func__ and its friends get this scope. */ static GTY(()) struct c_scope *current_function_scope; /* The C file scope. This is reset for each input translation unit. */ static GTY(()) struct c_scope *file_scope; /* The outermost scope. This is used for all declarations with external linkage, and only these, hence the name. */ static GTY(()) struct c_scope *external_scope; /* A chain of c_scope structures awaiting reuse. */ static GTY((deletable)) struct c_scope *scope_freelist; /* A chain of c_binding structures awaiting reuse. */ static GTY((deletable)) struct c_binding *binding_freelist; /* Append VAR to LIST in scope SCOPE. */ #define SCOPE_LIST_APPEND(scope, list, decl) do { \ struct c_scope *s_ = (scope); \ tree d_ = (decl); \ if (s_->list##_last) \ BLOCK_CHAIN (s_->list##_last) = d_; \ else \ s_->list = d_; \ s_->list##_last = d_; \ } while (0) /* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */ #define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \ struct c_scope *t_ = (tscope); \ struct c_scope *f_ = (fscope); \ if (t_->to##_last) \ BLOCK_CHAIN (t_->to##_last) = f_->from; \ else \ t_->to = f_->from; \ t_->to##_last = f_->from##_last; \ } while (0) /* A c_inline_static structure stores details of a static identifier referenced in a definition of a function that may be an inline definition if no subsequent declaration of that function uses "extern" or does not use "inline". */ struct GTY((chain_next ("%h.next"))) c_inline_static { /* The location for a diagnostic. */ location_t location; /* The function that may be an inline definition. */ tree function; /* The object or function referenced. */ tree static_decl; /* What sort of reference this is. */ enum c_inline_static_type type; /* The next such structure or NULL. */ struct c_inline_static *next; }; /* List of static identifiers used or referenced in functions that may be inline definitions. */ static GTY(()) struct c_inline_static *c_inline_statics; /* True means unconditionally make a BLOCK for the next scope pushed. */ static bool keep_next_level_flag; /* True means the next call to push_scope will be the outermost scope of a function body, so do not push a new scope, merely cease expecting parameter decls. */ static bool next_is_function_body; /* A vector of pointers to c_binding structures. */ typedef struct c_binding *c_binding_ptr; /* Information that we keep for a struct or union while it is being parsed. */ struct c_struct_parse_info { /* If warn_cxx_compat, a list of types defined within this struct. */ vec<tree> struct_types; /* If warn_cxx_compat, a list of field names which have bindings, and which are defined in this struct, but which are not defined in any enclosing struct. This is used to clear the in_struct field of the c_bindings structure. */ vec<c_binding_ptr> fields; /* If warn_cxx_compat, a list of typedef names used when defining fields in this struct. */ vec<tree> typedefs_seen; }; /* Information for the struct or union currently being parsed, or NULL if not parsing a struct or union. */ static struct c_struct_parse_info *struct_parse_info; /* Forward declarations. */ static tree lookup_name_in_scope (tree, struct c_scope *); static tree c_make_fname_decl (location_t, tree, int); static tree grokdeclarator (const struct c_declarator *, struct c_declspecs *, enum decl_context, bool, tree *, tree *, tree *, bool *, enum deprecated_states); static tree grokparms (struct c_arg_info *, bool); static void layout_array_type (tree); static void warn_defaults_to (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); /* T is a statement. Add it to the statement-tree. This is the C/ObjC version--C++ has a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ if (!building_stmt_list_p ()) push_stmt_list (); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Build a pointer type using the default pointer mode. */ static tree c_build_pointer_type (tree to_type) { addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC : TYPE_ADDR_SPACE (to_type); machine_mode pointer_mode; if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode) pointer_mode = targetm.addr_space.pointer_mode (as); else pointer_mode = c_default_pointer_mode; return build_pointer_type_for_mode (to_type, pointer_mode, false); } /* Return true if we will want to say something if a goto statement crosses DECL. */ static bool decl_jump_unsafe (tree decl) { if (error_operand_p (decl)) return false; /* Always warn about crossing variably modified types. */ if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == TYPE_DECL) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) return true; /* Otherwise, only warn if -Wgoto-misses-init and this is an initialized automatic decl. */ if (warn_jump_misses_init && TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl) && DECL_INITIAL (decl) != NULL_TREE) return true; return false; } void c_print_identifier (FILE *file, tree node, int indent) { void (*save) (enum c_oracle_request, tree identifier); /* Temporarily hide any binding oracle. Without this, calls to debug_tree from the debugger will end up calling into the oracle, making for a confusing debug session. As the oracle isn't needed here for normal operation, it's simplest to suppress it. */ save = c_binding_oracle; c_binding_oracle = NULL; print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4); print_node (file, "tag", I_TAG_DECL (node), indent + 4); print_node (file, "label", I_LABEL_DECL (node), indent + 4); if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN) { tree rid = ridpointers[C_RID_CODE (node)]; indent_to (file, indent + 4); fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"", (void *) rid, IDENTIFIER_POINTER (rid)); } c_binding_oracle = save; } /* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL, which may be any of several kinds of DECL or TYPE or error_mark_node, in the scope SCOPE. */ static void bind (tree name, tree decl, struct c_scope *scope, bool invisible, bool nested, location_t locus) { struct c_binding *b, **here; if (binding_freelist) { b = binding_freelist; binding_freelist = b->prev; } else b = ggc_alloc<c_binding> (); b->shadowed = 0; b->decl = decl; b->id = name; b->depth = scope->depth; b->invisible = invisible; b->nested = nested; b->inner_comp = 0; b->in_struct = 0; b->locus = locus; b->u.type = NULL; b->prev = scope->bindings; scope->bindings = b; if (decl_jump_unsafe (decl)) scope->has_jump_unsafe_decl = 1; if (!name) return; switch (TREE_CODE (decl)) { case LABEL_DECL: here = &I_LABEL_BINDING (name); break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: here = &I_TAG_BINDING (name); break; case VAR_DECL: case FUNCTION_DECL: case TYPE_DECL: case CONST_DECL: case PARM_DECL: case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break; default: gcc_unreachable (); } /* Locate the appropriate place in the chain of shadowed decls to insert this binding. Normally, scope == current_scope and this does nothing. */ while (*here && (*here)->depth > scope->depth) here = &(*here)->shadowed; b->shadowed = *here; *here = b; } /* Clear the binding structure B, stick it on the binding_freelist, and return the former value of b->prev. This is used by pop_scope and get_parm_info to iterate destructively over all the bindings from a given scope. */ static struct c_binding * free_binding_and_advance (struct c_binding *b) { struct c_binding *prev = b->prev; memset (b, 0, sizeof (struct c_binding)); b->prev = binding_freelist; binding_freelist = b; return prev; } /* Bind a label. Like bind, but skip fields which aren't used for labels, and add the LABEL_VARS value. */ static void bind_label (tree name, tree label, struct c_scope *scope, struct c_label_vars *label_vars) { struct c_binding *b; bind (name, label, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); scope->has_label_bindings = true; b = scope->bindings; gcc_assert (b->decl == label); label_vars->shadowed = b->u.label; b->u.label = label_vars; } /* Hook called at end of compilation to assume 1 elt for a file-scope tentative array defn that wasn't complete before. */ void c_finish_incomplete_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL) { tree type = TREE_TYPE (decl); if (type != error_mark_node && TREE_CODE (type) == ARRAY_TYPE && !DECL_EXTERNAL (decl) && TYPE_DOMAIN (type) == 0) { warning_at (DECL_SOURCE_LOCATION (decl), 0, "array %q+D assumed to have one element", decl); complete_array_type (&TREE_TYPE (decl), NULL_TREE, true); relayout_decl (decl); } } } /* Record that inline function FUNC contains a reference (location LOC) to static DECL (file-scope or function-local according to TYPE). */ void record_inline_static (location_t loc, tree func, tree decl, enum c_inline_static_type type) { c_inline_static *csi = ggc_alloc<c_inline_static> (); csi->location = loc; csi->function = func; csi->static_decl = decl; csi->type = type; csi->next = c_inline_statics; c_inline_statics = csi; } /* Check for references to static declarations in inline functions at the end of the translation unit and diagnose them if the functions are still inline definitions. */ static void check_inline_statics (void) { struct c_inline_static *csi; for (csi = c_inline_statics; csi; csi = csi->next) { if (DECL_EXTERNAL (csi->function)) switch (csi->type) { case csi_internal: pedwarn (csi->location, 0, "%qD is static but used in inline function %qD " "which is not static", csi->static_decl, csi->function); break; case csi_modifiable: pedwarn (csi->location, 0, "%q+D is static but declared in inline function %qD " "which is not static", csi->static_decl, csi->function); break; default: gcc_unreachable (); } } c_inline_statics = NULL; } /* Fill in a c_spot_bindings structure. If DEFINING is true, set it for the current state, otherwise set it to uninitialized. */ static void set_spot_bindings (struct c_spot_bindings *p, bool defining) { if (defining) { p->scope = current_scope; p->bindings_in_scope = current_scope->bindings; } else { p->scope = NULL; p->bindings_in_scope = NULL; } p->stmt_exprs = 0; p->left_stmt_expr = false; } /* Update spot bindings P as we pop out of SCOPE. Return true if we should push decls for a label. */ static bool update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p) { if (p->scope != scope) { /* This label or goto is defined in some other scope, or it is a label which is not yet defined. There is nothing to update. */ return false; } /* Adjust the spot bindings to refer to the bindings already defined in the enclosing scope. */ p->scope = scope->outer; p->bindings_in_scope = p->scope->bindings; return true; } /* The Objective-C front-end often needs to determine the current scope. */ void * objc_get_current_scope (void) { return current_scope; } /* The following function is used only by Objective-C. It needs to live here because it accesses the innards of c_scope. */ void objc_mark_locals_volatile (void *enclosing_blk) { struct c_scope *scope; struct c_binding *b; for (scope = current_scope; scope && scope != enclosing_blk; scope = scope->outer) { for (b = scope->bindings; b; b = b->prev) objc_volatilize_decl (b->decl); /* Do not climb up past the current function. */ if (scope->function_body) break; } } /* Return true if we are in the global binding level. */ bool global_bindings_p (void) { return current_scope == file_scope; } void keep_next_level (void) { keep_next_level_flag = true; } /* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */ void set_float_const_decimal64 (void) { current_scope->float_const_decimal64 = true; } /* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */ void clear_float_const_decimal64 (void) { current_scope->float_const_decimal64 = false; } /* Return nonzero if an unsuffixed float constant is _Decimal64. */ bool float_const_decimal64_p (void) { return current_scope->float_const_decimal64; } /* Identify this scope as currently being filled with parameters. */ void declare_parm_level (void) { current_scope->parm_flag = true; } void push_scope (void) { if (next_is_function_body) { /* This is the transition from the parameters to the top level of the function body. These are the same scope (C99 6.2.1p4,6) so we do not push another scope structure. next_is_function_body is set only by store_parm_decls, which in turn is called when and only when we are about to encounter the opening curly brace for the function body. The outermost block of a function always gets a BLOCK node, because the debugging output routines expect that each function has at least one BLOCK. */ current_scope->parm_flag = false; current_scope->function_body = true; current_scope->keep = true; current_scope->outer_function = current_function_scope; current_function_scope = current_scope; keep_next_level_flag = false; next_is_function_body = false; /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope->outer) current_scope->float_const_decimal64 = current_scope->outer->float_const_decimal64; else current_scope->float_const_decimal64 = false; } else { struct c_scope *scope; if (scope_freelist) { scope = scope_freelist; scope_freelist = scope->outer; } else scope = ggc_cleared_alloc<c_scope> (); /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope) scope->float_const_decimal64 = current_scope->float_const_decimal64; else scope->float_const_decimal64 = false; scope->keep = keep_next_level_flag; scope->outer = current_scope; scope->depth = current_scope ? (current_scope->depth + 1) : 0; /* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but possible. */ if (current_scope && scope->depth == 0) { scope->depth--; sorry ("GCC supports only %u nested scopes", scope->depth); } current_scope = scope; keep_next_level_flag = false; } } /* This is called when we are leaving SCOPE. For each label defined in SCOPE, add any appropriate decls to its decls_in_scope fields. These are the decls whose initialization will be skipped by a goto later in the function. */ static void update_label_decls (struct c_scope *scope) { struct c_scope *s; s = scope; while (s != NULL) { if (s->has_label_bindings) { struct c_binding *b; for (b = s->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; struct c_binding *b1; bool hjud; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; b1 = label_vars->label_bindings.bindings_in_scope; if (label_vars->label_bindings.scope == NULL) hjud = false; else hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl; if (update_spot_bindings (scope, &label_vars->label_bindings)) { /* This label is defined in this scope. */ if (hjud) { for (; b1 != NULL; b1 = b1->prev) { /* A goto from later in the function to this label will never see the initialization of B1, if any. Save it to issue a warning if needed. */ if (decl_jump_unsafe (b1->decl)) vec_safe_push(label_vars->decls_in_scope, b1->decl); } } } /* Update the bindings of any goto statements associated with this label. */ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) update_spot_bindings (scope, &g->goto_bindings); } } /* Don't search beyond the current function. */ if (s == current_function_scope) break; s = s->outer; } } /* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */ static void set_type_context (tree type, tree context) { for (type = TYPE_MAIN_VARIANT (type); type; type = TYPE_NEXT_VARIANT (type)) TYPE_CONTEXT (type) = context; } /* Exit a scope. Restore the state of the identifier-decl mappings that were in effect when this scope was entered. Return a BLOCK node containing all the DECLs in this scope that are of interest to debug info generation. */ tree pop_scope (void) { struct c_scope *scope = current_scope; tree block, context, p; struct c_binding *b; bool functionbody = scope->function_body; bool keep = functionbody || scope->keep || scope->bindings; update_label_decls (scope); /* If appropriate, create a BLOCK to record the decls for the life of this function. */ block = 0; if (keep) { block = make_node (BLOCK); BLOCK_SUBBLOCKS (block) = scope->blocks; TREE_USED (block) = 1; /* In each subblock, record that this is its superior. */ for (p = scope->blocks; p; p = BLOCK_CHAIN (p)) BLOCK_SUPERCONTEXT (p) = block; BLOCK_VARS (block) = 0; } /* The TYPE_CONTEXTs for all of the tagged types belonging to this scope must be set so that they point to the appropriate construct, i.e. either to the current FUNCTION_DECL node, or else to the BLOCK node we just constructed. Note that for tagged types whose scope is just the formal parameter list for some function type specification, we can't properly set their TYPE_CONTEXTs here, because we don't have a pointer to the appropriate FUNCTION_TYPE node readily available to us. For those cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set in `grokdeclarator' as soon as we have created the FUNCTION_TYPE node which will represent the "scope" for these "parameter list local" tagged types. */ if (scope->function_body) context = current_function_decl; else if (scope == file_scope) { tree file_decl = build_translation_unit_decl (NULL_TREE); context = file_decl; debug_hooks->register_main_translation_unit (file_decl); } else context = block; /* Clear all bindings in this scope. */ for (b = scope->bindings; b; b = free_binding_and_advance (b)) { p = b->decl; switch (TREE_CODE (p)) { case LABEL_DECL: /* Warnings for unused labels, errors for undefined labels. */ if (TREE_USED (p) && !DECL_INITIAL (p)) { error ("label %q+D used but not defined", p); DECL_INITIAL (p) = error_mark_node; } else warn_for_unused_label (p); /* Labels go in BLOCK_VARS. */ DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; gcc_assert (I_LABEL_BINDING (b->id) == b); I_LABEL_BINDING (b->id) = b->shadowed; /* Also pop back to the shadowed label_vars. */ release_tree_vector (b->u.label->decls_in_scope); b->u.label = b->u.label->shadowed; break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: set_type_context (p, context); /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } break; case FUNCTION_DECL: /* Propagate TREE_ADDRESSABLE from nested functions to their containing functions. */ if (!TREE_ASM_WRITTEN (p) && DECL_INITIAL (p) != 0 && TREE_ADDRESSABLE (p) && DECL_ABSTRACT_ORIGIN (p) != 0 && DECL_ABSTRACT_ORIGIN (p) != p) TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1; if (!DECL_EXTERNAL (p) && !DECL_INITIAL (p) && scope != file_scope && scope != external_scope) { error ("nested function %q+D declared but never defined", p); undef_nested_function = true; } else if (DECL_DECLARED_INLINE_P (p) && TREE_PUBLIC (p) && !DECL_INITIAL (p)) { /* C99 6.7.4p6: "a function with external linkage... declared with an inline function specifier ... shall also be defined in the same translation unit." */ if (!flag_gnu89_inline && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p)) && scope != external_scope) pedwarn (input_location, 0, "inline function %q+D declared but never defined", p); DECL_EXTERNAL (p) = 1; } goto common_symbol; case VAR_DECL: /* Warnings for unused variables. */ if ((!TREE_USED (p) || !DECL_READ_P (p)) && !TREE_NO_WARNING (p) && !DECL_IN_SYSTEM_HEADER (p) && DECL_NAME (p) && !DECL_ARTIFICIAL (p) && scope != file_scope && scope != external_scope) { if (!TREE_USED (p)) warning (OPT_Wunused_variable, "unused variable %q+D", p); else if (DECL_CONTEXT (p) == current_function_decl) warning_at (DECL_SOURCE_LOCATION (p), OPT_Wunused_but_set_variable, "variable %qD set but not used", p); } if (b->inner_comp) { error ("type of array %q+D completed incompatibly with" " implicit initialization", p); } /* Fall through. */ case TYPE_DECL: case CONST_DECL: common_symbol: /* All of these go in BLOCK_VARS, but only if this is the binding in the home scope. */ if (!b->nested) { DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; } else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope) { /* For block local externs add a special DECL_EXTERNAL decl for debug info generation. */ tree extp = copy_node (p); DECL_EXTERNAL (extp) = 1; TREE_STATIC (extp) = 0; TREE_PUBLIC (extp) = 1; DECL_INITIAL (extp) = NULL_TREE; DECL_LANG_SPECIFIC (extp) = NULL; DECL_CONTEXT (extp) = current_function_decl; if (TREE_CODE (p) == FUNCTION_DECL) { DECL_RESULT (extp) = NULL_TREE; DECL_SAVED_TREE (extp) = NULL_TREE; DECL_STRUCT_FUNCTION (extp) = NULL; } if (b->locus != UNKNOWN_LOCATION) DECL_SOURCE_LOCATION (extp) = b->locus; DECL_CHAIN (extp) = BLOCK_VARS (block); BLOCK_VARS (block) = extp; } /* If this is the file scope set DECL_CONTEXT of each decl to the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p work. */ if (scope == file_scope) { DECL_CONTEXT (p) = context; if (TREE_CODE (p) == TYPE_DECL && TREE_TYPE (p) != error_mark_node) set_type_context (TREE_TYPE (p), context); } /* Fall through. */ /* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have already been put there by store_parm_decls. Unused- parameter warnings are handled by function.c. error_mark_node obviously does not go in BLOCK_VARS and does not get unused-variable warnings. */ case PARM_DECL: case ERROR_MARK: /* It is possible for a decl not to have a name. We get here with b->id NULL in this case. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } break; default: gcc_unreachable (); } } /* Dispose of the block that we just made inside some higher level. */ if ((scope->function_body || scope == file_scope) && context) { DECL_INITIAL (context) = block; BLOCK_SUPERCONTEXT (block) = context; } else if (scope->outer) { if (block) SCOPE_LIST_APPEND (scope->outer, blocks, block); /* If we did not make a block for the scope just exited, any blocks made for inner scopes must be carried forward so they will later become subblocks of something else. */ else if (scope->blocks) SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks); } /* Pop the current scope, and free the structure for reuse. */ current_scope = scope->outer; if (scope->function_body) current_function_scope = scope->outer_function; memset (scope, 0, sizeof (struct c_scope)); scope->outer = scope_freelist; scope_freelist = scope; return block; } void push_file_scope (void) { tree decl; if (file_scope) return; push_scope (); file_scope = current_scope; start_fname_decls (); for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl)) bind (DECL_NAME (decl), decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); } void pop_file_scope (void) { /* In case there were missing closebraces, get us back to the global binding level. */ while (current_scope != file_scope) pop_scope (); /* __FUNCTION__ is defined at file scope (""). This call may not be necessary as my tests indicate it still works without it. */ finish_fname_decls (); check_inline_statics (); /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { c_common_write_pch (); return; } /* Pop off the file scope and close this translation unit. */ pop_scope (); file_scope = 0; maybe_apply_pending_pragma_weaks (); } /* Adjust the bindings for the start of a statement expression. */ void c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; ++label_vars->label_bindings.stmt_exprs; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) ++g->goto_bindings.stmt_exprs; } } if (switch_bindings != NULL) ++switch_bindings->stmt_exprs; } /* Adjust the bindings for the end of a statement expression. */ void c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; --label_vars->label_bindings.stmt_exprs; if (label_vars->label_bindings.stmt_exprs < 0) { label_vars->label_bindings.left_stmt_expr = true; label_vars->label_bindings.stmt_exprs = 0; } FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { --g->goto_bindings.stmt_exprs; if (g->goto_bindings.stmt_exprs < 0) { g->goto_bindings.left_stmt_expr = true; g->goto_bindings.stmt_exprs = 0; } } } } if (switch_bindings != NULL) { --switch_bindings->stmt_exprs; gcc_assert (switch_bindings->stmt_exprs >= 0); } } /* Push a definition or a declaration of struct, union or enum tag "name". "type" should be the type node. We assume that the tag "name" is not already defined, and has a location of LOC. Note that the definition may really be just a forward reference. In that case, the TYPE_SIZE will be zero. */ static void pushtag (location_t loc, tree name, tree type) { /* Record the identifier as the type's name if it has none. */ if (name && !TYPE_NAME (type)) TYPE_NAME (type) = name; bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc); /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the tagged type we just added to the current scope. This fake NULL-named TYPE_DECL node helps dwarfout.c to know when it needs to output a representation of a tagged type, and it also gives us a convenient place to record the "scope start" address for the tagged type. */ TYPE_STUB_DECL (type) = pushdecl (build_decl (loc, TYPE_DECL, NULL_TREE, type)); /* An approximation for now, so we can tell this is a function-scope tag. This will be updated in pop_scope. */ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type)); if (warn_cxx_compat && name != NULL_TREE) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b != NULL && b->decl != NULL_TREE && TREE_CODE (b->decl) == TYPE_DECL && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl)) != TYPE_MAIN_VARIANT (type))) { warning_at (loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), b->decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } } /* An exported interface to pushtag. This is used by the gdb plugin's binding oracle to introduce a new tag binding. */ void c_pushtag (location_t loc, tree name, tree type) { pushtag (loc, name, type); } /* An exported interface to bind a declaration. LOC is the location to use. DECL is the declaration to bind. The decl's name is used to determine how it is bound. If DECL is a VAR_DECL, then IS_GLOBAL determines whether the decl is put into the global (file and external) scope or the current function's scope; if DECL is not a VAR_DECL then it is always put into the file scope. */ void c_bind (location_t loc, tree decl, bool is_global) { struct c_scope *scope; bool nested = false; if (TREE_CODE (decl) != VAR_DECL || current_function_scope == NULL) { /* Types and functions are always considered to be global. */ scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else if (is_global) { /* Also bind it into the external scope. */ bind (DECL_NAME (decl), decl, external_scope, true, false, loc); nested = true; scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else { DECL_CONTEXT (decl) = current_function_decl; TREE_PUBLIC (decl) = 0; scope = current_function_scope; } bind (DECL_NAME (decl), decl, scope, false, nested, loc); } /* Subroutine of compare_decls. Allow harmless mismatches in return and argument types provided that the type modes match. This function return a unified type given a suitable match, and 0 otherwise. */ static tree match_builtin_function_types (tree newtype, tree oldtype) { tree newrettype, oldrettype; tree newargs, oldargs; tree trytype, tryargs; /* Accept the return type of the new declaration if same modes. */ oldrettype = TREE_TYPE (oldtype); newrettype = TREE_TYPE (newtype); if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype)) return 0; oldargs = TYPE_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); tryargs = newargs; while (oldargs || newargs) { if (!oldargs || !newargs || !TREE_VALUE (oldargs) || !TREE_VALUE (newargs) || TYPE_MODE (TREE_VALUE (oldargs)) != TYPE_MODE (TREE_VALUE (newargs))) return 0; oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); } trytype = build_function_type (newrettype, tryargs); return build_type_attribute_variant (trytype, TYPE_ATTRIBUTES (oldtype)); } /* Subroutine of diagnose_mismatched_decls. Check for function type mismatch involving an empty arglist vs a nonempty one and give clearer diagnostics. */ static void diagnose_arglist_conflict (tree newdecl, tree olddecl, tree newtype, tree oldtype) { tree t; if (TREE_CODE (olddecl) != FUNCTION_DECL || !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype)) || !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == 0) || (!prototype_p (newtype) && DECL_INITIAL (newdecl) == 0))) return; t = TYPE_ARG_TYPES (oldtype); if (t == 0) t = TYPE_ARG_TYPES (newtype); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == 0 && TYPE_MAIN_VARIANT (type) != void_type_node) { inform (input_location, "a parameter list with an ellipsis can%'t match " "an empty parameter name list declaration"); break; } if (c_type_promotes_to (type) != type) { inform (input_location, "an argument type that has a default promotion can%'t match " "an empty parameter name list declaration"); break; } } } /* Another subroutine of diagnose_mismatched_decls. OLDDECL is an old-style function definition, NEWDECL is a prototype declaration. Diagnose inconsistencies in the argument list. Returns TRUE if the prototype is compatible, FALSE if not. */ static bool validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype) { tree newargs, oldargs; int i; #define END_OF_ARGLIST(t) ((t) == void_type_node) oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); i = 1; for (;;) { tree oldargtype = TREE_VALUE (oldargs); tree newargtype = TREE_VALUE (newargs); if (oldargtype == error_mark_node || newargtype == error_mark_node) return false; oldargtype = (TYPE_ATOMIC (oldargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (oldargtype)); newargtype = (TYPE_ATOMIC (newargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (newargtype)); if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype)) break; /* Reaching the end of just one list means the two decls don't agree on the number of arguments. */ if (END_OF_ARGLIST (oldargtype)) { error ("prototype for %q+D declares more arguments " "than previous old-style definition", newdecl); return false; } else if (END_OF_ARGLIST (newargtype)) { error ("prototype for %q+D declares fewer arguments " "than previous old-style definition", newdecl); return false; } /* Type for passing arg must be consistent with that declared for the arg. */ else if (!comptypes (oldargtype, newargtype)) { error ("prototype for %q+D declares argument %d" " with incompatible type", newdecl, i); return false; } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); i++; } /* If we get here, no errors were found, but do issue a warning for this poor-style construct. */ warning (0, "prototype for %q+D follows non-prototype definition", newdecl); return true; #undef END_OF_ARGLIST } /* Subroutine of diagnose_mismatched_decls. Report the location of DECL, first in a pair of mismatched declarations, using the diagnostic function DIAG. */ static void locate_old_decl (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl) && !C_DECL_DECLARED_BUILTIN (decl)) ; else if (DECL_INITIAL (decl)) inform (input_location, "previous definition of %q+D was here", decl); else if (C_DECL_IMPLICIT (decl)) inform (input_location, "previous implicit declaration of %q+D was here", decl); else inform (input_location, "previous declaration of %q+D was here", decl); } /* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL. Returns true if the caller should proceed to merge the two, false if OLDDECL should simply be discarded. As a side effect, issues all necessary diagnostics for invalid or poor-style combinations. If it returns true, writes the types of NEWDECL and OLDDECL to *NEWTYPEP and *OLDTYPEP - these may have been adjusted from TREE_TYPE (NEWDECL, OLDDECL) respectively. */ static bool diagnose_mismatched_decls (tree newdecl, tree olddecl, tree *newtypep, tree *oldtypep) { tree newtype, oldtype; bool pedwarned = false; bool warned = false; bool retval = true; #define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \ && DECL_EXTERNAL (DECL)) /* If we have error_mark_node for either decl or type, just discard the previous decl - we're in an error cascade already. */ if (olddecl == error_mark_node || newdecl == error_mark_node) return false; *oldtypep = oldtype = TREE_TYPE (olddecl); *newtypep = newtype = TREE_TYPE (newdecl); if (oldtype == error_mark_node || newtype == error_mark_node) return false; /* Two different categories of symbol altogether. This is an error unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */ if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { if (!(TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))) { error ("%q+D redeclared as different kind of symbol", newdecl); locate_old_decl (olddecl); } else if (TREE_PUBLIC (newdecl)) warning (0, "built-in function %q+D declared as non-function", newdecl); else warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); return false; } /* Enumerators have no linkage, so may only be declared once in a given scope. */ if (TREE_CODE (olddecl) == CONST_DECL) { error ("redeclaration of enumerator %q+D", newdecl); locate_old_decl (olddecl); return false; } if (!comptypes (oldtype, newtype)) { if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) { /* Accept harmless mismatch in function types. This is for the ffs and fprintf builtins. */ tree trytype = match_builtin_function_types (newtype, oldtype); if (trytype && comptypes (newtype, trytype)) *oldtypep = oldtype = trytype; else { /* If types don't match for a built-in, throw away the built-in. No point in calling locate_old_decl here, it won't print anything. */ warning (0, "conflicting types for built-in function %q+D", newdecl); return false; } } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_IS_BUILTIN (olddecl)) { /* A conflicting function declaration for a predeclared function that isn't actually built in. Objective C uses these. The new declaration silently overrides everything but the volatility (i.e. noreturn) indication. See also below. FIXME: Make Objective C use normal builtins. */ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); return false; } /* Permit void foo (...) to match int foo (...) if the latter is the definition and implicit int was used. See c-torture/compile/920625-2.c. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node && C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (newdecl) = *newtypep = newtype = oldtype; C_FUNCTION_IMPLICIT_INT (newdecl) = 0; } /* Permit void foo (...) to match an earlier call to foo (...) with no declared type (thus, implicitly int). */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node && C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype; } else { int new_quals = TYPE_QUALS (newtype); int old_quals = TYPE_QUALS (oldtype); if (new_quals != old_quals) { addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals); addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals); if (new_addr != old_addr) { if (ADDR_SPACE_GENERIC_P (new_addr)) error ("conflicting named address spaces (generic vs %s) " "for %q+D", c_addr_space_name (old_addr), newdecl); else if (ADDR_SPACE_GENERIC_P (old_addr)) error ("conflicting named address spaces (%s vs generic) " "for %q+D", c_addr_space_name (new_addr), newdecl); else error ("conflicting named address spaces (%s vs %s) " "for %q+D", c_addr_space_name (new_addr), c_addr_space_name (old_addr), newdecl); } if (CLEAR_QUAL_ADDR_SPACE (new_quals) != CLEAR_QUAL_ADDR_SPACE (old_quals)) error ("conflicting type qualifiers for %q+D", newdecl); } else error ("conflicting types for %q+D", newdecl); diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype); locate_old_decl (olddecl); return false; } } /* Redeclaration of a type is a constraint violation (6.7.2.3p1), but silently ignore the redeclaration if either is in a system header. (Conflicting redeclarations were handled above.) This is allowed for C11 if the types are the same, not just compatible. */ if (TREE_CODE (newdecl) == TYPE_DECL) { bool types_different = false; int comptypes_result; comptypes_result = comptypes_check_different_types (oldtype, newtype, &types_different); if (comptypes_result != 1 || types_different) { error ("redefinition of typedef %q+D with different type", newdecl); locate_old_decl (olddecl); return false; } if (DECL_IN_SYSTEM_HEADER (newdecl) || DECL_IN_SYSTEM_HEADER (olddecl) || TREE_NO_WARNING (newdecl) || TREE_NO_WARNING (olddecl)) return true; /* Allow OLDDECL to continue in use. */ if (variably_modified_type_p (newtype, NULL)) { error ("redefinition of typedef %q+D with variably modified type", newdecl); locate_old_decl (olddecl); } else if (pedwarn_c99 (input_location, OPT_Wpedantic, "redefinition of typedef %q+D", newdecl)) locate_old_decl (olddecl); return true; } /* Function declarations can either be 'static' or 'extern' (no qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore can never conflict with each other on account of linkage (6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but gnu89 mode permits two definitions if one is 'extern inline' and one is not. The non- extern-inline definition supersedes the extern-inline definition. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If you declare a built-in function name as static, or define the built-in with an old-style definition (so we can't validate the argument list) the built-in definition is overridden, but optionally warn this was a bad choice of name. */ if (DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl) && (!TREE_PUBLIC (newdecl) || (DECL_INITIAL (newdecl) && !prototype_p (TREE_TYPE (newdecl))))) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); /* Discard the old built-in function. */ return false; } if (DECL_INITIAL (newdecl)) { if (DECL_INITIAL (olddecl)) { /* If both decls are in the same TU and the new declaration isn't overriding an extern inline reject the new decl. In c99, no overriding is allowed in the same translation unit. */ if ((!DECL_EXTERN_INLINE (olddecl) || DECL_EXTERN_INLINE (newdecl) || (!flag_gnu89_inline && (!DECL_DECLARED_INLINE_P (olddecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl))) && (!DECL_DECLARED_INLINE_P (newdecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)))) ) && same_translation_unit_p (newdecl, olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } } } /* If we have a prototype after an old-style function definition, the argument types must be checked specially. */ else if (DECL_INITIAL (olddecl) && !prototype_p (oldtype) && prototype_p (newtype) && TYPE_ACTUAL_ARG_TYPES (oldtype) && !validate_proto_after_old_defn (newdecl, newtype, oldtype)) { locate_old_decl (olddecl); return false; } /* A non-static declaration (even an "extern") followed by a static declaration is undefined behavior per C99 6.2.2p3-5,7. The same is true for a static forward declaration at block scope followed by a non-static declaration/definition at file scope. Static followed by non-static at the same scope is not undefined behavior, and is the most convenient way to get some effects (see e.g. what unwind-dw2-fde-glibc.c does to the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but we do diagnose it if -Wtraditional. */ if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl)) { /* Two exceptions to the rule. If olddecl is an extern inline, or a predeclared function that isn't actually built in, newdecl silently overrides olddecl. The latter occur only in Objective C; see also above. (FIXME: Make Objective C use normal builtins.) */ if (!DECL_IS_BUILTIN (olddecl) && !DECL_EXTERN_INLINE (olddecl)) { error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); } return false; } else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl)) { if (DECL_CONTEXT (olddecl)) { error ("non-static declaration of %q+D follows " "static declaration", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } /* Make sure gnu_inline attribute is either not present, or present on all inline decls. */ if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool newa = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) != NULL; bool olda = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl)) != NULL; if (newa != olda) { error_at (input_location, "%<gnu_inline%> attribute present on %q+D", newa ? newdecl : olddecl); error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl), "but not here"); } } } else if (TREE_CODE (newdecl) == VAR_DECL) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl)) { /* Nothing to check. Since OLDDECL is marked threadprivate and NEWDECL does not have a thread-local attribute, we will merge the threadprivate attribute into NEWDECL. */ ; } else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl)) { if (DECL_THREAD_LOCAL_P (newdecl)) error ("thread-local declaration of %q+D follows " "non-thread-local declaration", newdecl); else error ("non-thread-local declaration of %q+D follows " "thread-local declaration", newdecl); locate_old_decl (olddecl); return false; } /* Multiple initialized definitions are not allowed (6.9p3,5). */ if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } /* Objects declared at file scope: if the first declaration had external linkage (even if it was an external reference) the second must have external linkage as well, or the behavior is undefined. If the first declaration had internal linkage, then the second must too, or else be an external reference (in which case the composite declaration still has internal linkage). As for function declarations, we warn about the static-then- extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */ if (DECL_FILE_SCOPE_P (newdecl) && TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl)) { if (DECL_EXTERNAL (newdecl)) { if (!DECL_FILE_SCOPE_P (olddecl)) { error ("extern declaration of %q+D follows " "declaration with no linkage", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } else { if (TREE_PUBLIC (newdecl)) error ("non-static declaration of %q+D follows " "static declaration", newdecl); else error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); return false; } } /* Two objects with the same name declared at the same block scope must both be external references (6.7p3). */ else if (!DECL_FILE_SCOPE_P (newdecl)) { if (DECL_EXTERNAL (newdecl)) { /* Extern with initializer at block scope, which will already have received an error. */ } else if (DECL_EXTERNAL (olddecl)) { error ("declaration of %q+D with no linkage follows " "extern declaration", newdecl); locate_old_decl (olddecl); } else { error ("redeclaration of %q+D with no linkage", newdecl); locate_old_decl (olddecl); } return false; } /* C++ does not permit a decl to appear multiple times at file scope. */ if (warn_cxx_compat && DECL_FILE_SCOPE_P (newdecl) && !DECL_EXTERNAL (newdecl) && !DECL_EXTERNAL (olddecl)) warned |= warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wc___compat, ("duplicate declaration of %qD is " "invalid in C++"), newdecl); } /* warnings */ /* All decls must agree on a visibility. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { warned |= warning (0, "redeclaration of %q+D with different visibility " "(old visibility preserved)", newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* Diagnose inline __attribute__ ((noinline)) which is silly. */ if (DECL_DECLARED_INLINE_P (newdecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "inline declaration of %qD follows " "declaration with attribute noinline", newdecl); else if (DECL_DECLARED_INLINE_P (olddecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "noinline follows inline declaration ", newdecl); else if (lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("always_inline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "noinline", "always_inline"); else if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "always_inline", "noinline"); else if (lookup_attribute ("cold", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("hot", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "cold", "hot"); else if (lookup_attribute ("hot", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("cold", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "hot", "cold"); } else /* PARM_DECL, VAR_DECL */ { /* Redeclaration of a parameter is a constraint violation (this is not explicitly stated, but follows from C99 6.7p3 [no more than one declaration of the same identifier with no linkage in the same scope, except type tags] and 6.2.2p6 [parameters have no linkage]). We must check for a forward parameter declaration, indicated by TREE_ASM_WRITTEN on the old declaration - this is an extension, the mandatory diagnostic for which is handled by mark_forward_parm_decls. */ if (TREE_CODE (newdecl) == PARM_DECL && (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl))) { error ("redefinition of parameter %q+D", newdecl); locate_old_decl (olddecl); return false; } } /* Optional warning for completely redundant decls. */ if (!warned && !pedwarned && warn_redundant_decls /* Don't warn about a function declaration followed by a definition. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)) /* Don't warn about redundant redeclarations of builtins. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && !DECL_BUILT_IN (newdecl) && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) /* Don't warn about an extern followed by a definition. */ && !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl)) /* Don't warn about forward parameter decls. */ && !(TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) /* Don't warn about a variable definition following a declaration. */ && !(TREE_CODE (newdecl) == VAR_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))) { warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D", newdecl); } /* Report location of previous decl/defn. */ if (warned || pedwarned) locate_old_decl (olddecl); #undef DECL_EXTERN_INLINE return retval; } /* Subroutine of duplicate_decls. NEWDECL has been found to be consistent with OLDDECL, but carries new information. Merge the new information into OLDDECL. This function issues no diagnostics. */ static void merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype) { bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != 0); bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (newdecl))); bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (olddecl))); /* For real parm decl following a forward decl, rechain the old decl in its new location and clear TREE_ASM_WRITTEN (it's not a forward decl anymore). */ if (TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) { struct c_binding *b, **here; for (here = &current_scope->bindings; *here; here = &(*here)->prev) if ((*here)->decl == olddecl) goto found; gcc_unreachable (); found: b = *here; *here = b->prev; b->prev = current_scope->bindings; current_scope->bindings = b; TREE_ASM_WRITTEN (olddecl) = 0; } DECL_ATTRIBUTES (newdecl) = targetm.merge_decl_attributes (olddecl, newdecl); /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = newtype; newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) TYPE_ALIGN (newtype) = TYPE_ALIGN (tem); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } /* Merge the data types specified in the two decls. */ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = composite_type (newtype, oldtype); /* Lay the type out, unless already done. */ if (!comptypes (oldtype, TREE_TYPE (newdecl))) { if (TREE_TYPE (newdecl) != error_mark_node) layout_type (TREE_TYPE (newdecl)); if (TREE_CODE (newdecl) != FUNCTION_DECL && TREE_CODE (newdecl) != TYPE_DECL && TREE_CODE (newdecl) != CONST_DECL) layout_decl (newdecl, 0); } else { /* Since the type is OLDDECL's, make OLDDECL's size go with. */ DECL_SIZE (newdecl) = DECL_SIZE (olddecl); DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl); DECL_MODE (newdecl) = DECL_MODE (olddecl); if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } } /* Keep the old rtl since we can safely use it. */ if (HAS_RTL_P (olddecl)) COPY_DECL_RTL (olddecl, newdecl); /* Merge the type qualifiers. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* If a decl is in a system header and the other isn't, keep the one on the system header. Otherwise, keep source location of definition rather than declaration and of prototype rather than non-prototype unless that prototype is built-in. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (olddecl) && !DECL_IN_SYSTEM_HEADER (newdecl) ) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (newdecl) && !DECL_IN_SYSTEM_HEADER (olddecl)) DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl); else if ((DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0) || (old_is_prototype && !new_is_prototype && !C_DECL_BUILTIN_PROTOTYPE (olddecl))) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == 0) DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); /* Merge the threadprivate attribute. */ if (TREE_CODE (olddecl) == VAR_DECL && C_DECL_THREADPRIVATE_P (olddecl)) C_DECL_THREADPRIVATE_P (newdecl) = 1; if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)) { /* Copy the assembler name. Currently, it can only be defined in the prototype. */ COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Use visibility of whichever declaration had it specified */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); /* For functions, static overrides non-static. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl); /* This is since we don't automatically copy the attributes of NEWDECL into OLDDECL. */ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); /* If this clears `static', clear it in the identifier too. */ if (!TREE_PUBLIC (olddecl)) TREE_PUBLIC (DECL_NAME (olddecl)) = 0; } } /* In c99, 'extern' declaration before (or after) 'inline' means this function is not DECL_EXTERNAL, unless 'gnu_inline' attribute is present. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && !flag_gnu89_inline && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && (!DECL_DECLARED_INLINE_P (newdecl) || !DECL_DECLARED_INLINE_P (olddecl) || !DECL_EXTERNAL (olddecl)) && DECL_EXTERNAL (newdecl) && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) && !current_function_decl) DECL_EXTERNAL (newdecl) = 0; /* An inline definition following a static declaration is not DECL_EXTERNAL. */ if (new_is_definition && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && !TREE_PUBLIC (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (DECL_EXTERNAL (newdecl)) { TREE_STATIC (newdecl) = TREE_STATIC (olddecl); DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl); /* An extern decl does not override previous storage class. */ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); if (!DECL_EXTERNAL (newdecl)) { DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); } } else { TREE_STATIC (olddecl) = TREE_STATIC (newdecl); TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If we're redefining a function previously defined as extern inline, make sure we emit debug info for the inline before we throw it away, in case it was inlined into a function that hasn't been written out yet. */ if (new_is_definition && DECL_INITIAL (olddecl)) /* The new defn must not be inline. */ DECL_UNINLINABLE (newdecl) = 1; else { /* If either decl says `inline', this fn is inline, unless its definition was passed already. */ if (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) DECL_DECLARED_INLINE_P (newdecl) = 1; DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } if (DECL_BUILT_IN (olddecl)) { /* If redeclaring a builtin function, it stays built in. But it gets tagged as having been declared. */ DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl); DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl); C_DECL_DECLARED_BUILTIN (newdecl) = 1; if (new_is_prototype) { C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0; if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } } } else C_DECL_BUILTIN_PROTOTYPE (newdecl) = C_DECL_BUILTIN_PROTOTYPE (olddecl); } /* Preserve function specific target and optimization options */ if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); /* Also preserve various other info from the definition. */ if (!new_is_definition) { tree t; DECL_RESULT (newdecl) = DECL_RESULT (olddecl); DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl)); for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = newdecl; /* See if we've got a function to instantiate from. */ if (DECL_SAVED_TREE (olddecl)) DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ABSTRACT_ORIGIN (olddecl); } } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (TREE_CODE (olddecl) == VAR_DECL || TREE_CODE (olddecl) == PARM_DECL) DECL_READ_P (newdecl) |= DECL_READ_P (olddecl); if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Copy most of the decl-specific fields of NEWDECL into OLDDECL. But preserve OLDDECL's DECL_UID, DECL_CONTEXT and DECL_ARGUMENTS (if appropriate). */ { unsigned olddecl_uid = DECL_UID (olddecl); tree olddecl_context = DECL_CONTEXT (olddecl); tree olddecl_arguments = NULL; if (TREE_CODE (olddecl) == FUNCTION_DECL) olddecl_arguments = DECL_ARGUMENTS (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); switch (TREE_CODE (olddecl)) { case FUNCTION_DECL: case VAR_DECL: { struct symtab_node *snode = olddecl->decl_with_vis.symtab_node; memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); olddecl->decl_with_vis.symtab_node = snode; if ((DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) && DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); /* This isn't quite correct for something like int __thread x attribute ((tls_model ("local-exec"))); extern int __thread x; as we'll lose the "local-exec" model. */ if (TREE_CODE (olddecl) == VAR_DECL && DECL_THREAD_LOCAL_P (newdecl)) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); break; } case FIELD_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: case CONST_DECL: case TYPE_DECL: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common)); } DECL_UID (olddecl) = olddecl_uid; DECL_CONTEXT (olddecl) = olddecl_context; if (TREE_CODE (olddecl) == FUNCTION_DECL) DECL_ARGUMENTS (olddecl) = olddecl_arguments; } /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (TREE_CODE (olddecl) == VAR_DECL && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); } /* Handle when a new declaration NEWDECL has the same name as an old one OLDDECL in the same binding contour. Prints an error message if appropriate. If safely possible, alter OLDDECL to look like NEWDECL, and return true. Otherwise, return false. */ static bool duplicate_decls (tree newdecl, tree olddecl) { tree newtype = NULL, oldtype = NULL; if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype)) { /* Avoid `unused variable' and other warnings for OLDDECL. */ TREE_NO_WARNING (olddecl) = 1; return false; } merge_decls (newdecl, olddecl, newtype, oldtype); /* The NEWDECL will no longer be needed. Before releasing the node, be sure to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between NEWDECL and OLDECL. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (TREE_CODE (newdecl) == FUNCTION_DECL || TREE_CODE (newdecl) == VAR_DECL) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } ggc_free (newdecl); return true; } /* Check whether decl-node NEW_DECL shadows an existing declaration. */ static void warn_if_shadowing (tree new_decl) { struct c_binding *b; /* Shadow warnings wanted? */ if (!warn_shadow /* No shadow warnings for internally generated vars. */ || DECL_IS_BUILTIN (new_decl) /* No shadow warnings for vars made for inlining. */ || DECL_FROM_INLINE (new_decl)) return; /* Is anything being shadowed? Invisible decls do not count. */ for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed) if (b->decl && b->decl != new_decl && !b->invisible && (b->decl == error_mark_node || diagnostic_report_warnings_p (global_dc, DECL_SOURCE_LOCATION (b->decl)))) { tree old_decl = b->decl; bool warned = false; if (old_decl == error_mark_node) { warning (OPT_Wshadow, "declaration of %q+D shadows previous " "non-variable", new_decl); break; } else if (TREE_CODE (old_decl) == PARM_DECL) warned = warning (OPT_Wshadow, "declaration of %q+D shadows a parameter", new_decl); else if (DECL_FILE_SCOPE_P (old_decl)) { /* Do not warn if a variable shadows a function, unless the variable is a function or a pointer-to-function. */ if (TREE_CODE (old_decl) == FUNCTION_DECL && TREE_CODE (new_decl) != FUNCTION_DECL && !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl))) continue; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow, "declaration of %qD shadows a global " "declaration", new_decl); } else if (TREE_CODE (old_decl) == FUNCTION_DECL && DECL_BUILT_IN (old_decl)) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", new_decl); break; } else warned = warning (OPT_Wshadow, "declaration of %q+D shadows a " "previous local", new_decl); if (warned) inform (DECL_SOURCE_LOCATION (old_decl), "shadowed declaration is here"); break; } } /* Record a decl-node X as belonging to the current lexical scope. Check for errors (such as an incompatible declaration for the same name already seen in the same scope). Returns either X or an old decl for the same name. If an old decl is returned, it may have been smashed to agree with what X says. */ tree pushdecl (tree x) { tree name = DECL_NAME (x); struct c_scope *scope = current_scope; struct c_binding *b; bool nested = false; location_t locus = DECL_SOURCE_LOCATION (x); /* Must set DECL_CONTEXT for everything not at file scope or DECL_FILE_SCOPE_P won't work. Local externs don't count unless they have initializers (which generate code). */ if (current_function_decl && ((TREE_CODE (x) != FUNCTION_DECL && TREE_CODE (x) != VAR_DECL) || DECL_INITIAL (x) || !DECL_EXTERNAL (x))) DECL_CONTEXT (x) = current_function_decl; /* Anonymous decls are just inserted in the scope. */ if (!name) { bind (name, x, scope, /*invisible=*/false, /*nested=*/false, locus); return x; } /* First, see if there is another declaration with the same name in the current scope. If there is, duplicate_decls may do all the work for us. If duplicate_decls returns false, that indicates two incompatible decls in the same scope; we are to silently replace the old one (duplicate_decls has issued all appropriate diagnostics). In particular, we should not consider possible duplicates in the external scope, or shadowing. */ b = I_SYMBOL_BINDING (name); if (b && B_IN_SCOPE (b, scope)) { struct c_binding *b_ext, *b_use; tree type = TREE_TYPE (x); tree visdecl = b->decl; tree vistype = TREE_TYPE (visdecl); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && COMPLETE_TYPE_P (TREE_TYPE (x))) b->inner_comp = false; b_use = b; b_ext = b; /* If this is an external linkage declaration, we should check for compatibility with the type in the external scope before setting the type at this scope based on the visible information only. */ if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl)) { while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { b_use = b_ext; if (b_use->u.type) TREE_TYPE (b_use->decl) = b_use->u.type; } } if (duplicate_decls (x, b_use->decl)) { if (b_use != b) { /* Save the updated type in the external scope and restore the proper type for this scope. */ tree thistype; if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b_use->decl); b_use->u.type = TREE_TYPE (b_use->decl); if (TREE_CODE (b_use->decl) == FUNCTION_DECL && DECL_BUILT_IN (b_use->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b_use->u.type)); TREE_TYPE (b_use->decl) = thistype; } return b_use->decl; } else goto skip_external_and_shadow_checks; } /* All declarations with external linkage, and all external references, go in the external scope, no matter what scope is current. However, the binding in that scope is ignored for purposes of normal name lookup. A separate binding structure is created in the requested scope; this governs the normal visibility of the symbol. The binding in the externals scope is used exclusively for detecting duplicate declarations of the same object, no matter what scope they are in; this is what we do here. (C99 6.2.7p2: All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined.) */ if (DECL_EXTERNAL (x) || scope == file_scope) { tree type = TREE_TYPE (x); tree vistype = 0; tree visdecl = 0; bool type_saved = false; if (b && !B_IN_EXTERNAL_SCOPE (b) && (TREE_CODE (b->decl) == FUNCTION_DECL || TREE_CODE (b->decl) == VAR_DECL) && DECL_FILE_SCOPE_P (b->decl)) { visdecl = b->decl; vistype = TREE_TYPE (visdecl); } if (scope != file_scope && !DECL_IN_SYSTEM_HEADER (x)) warning (OPT_Wnested_externs, "nested extern declaration of %qD", x); while (b && !B_IN_EXTERNAL_SCOPE (b)) { /* If this decl might be modified, save its type. This is done here rather than when the decl is first bound because the type may change after first binding, through being completed or through attributes being added. If we encounter multiple such decls, only the first should have its type saved; the others will already have had their proper types saved and the types will not have changed as their scopes will not have been re-entered. */ if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved) { b->u.type = TREE_TYPE (b->decl); type_saved = true; } if (B_IN_FILE_SCOPE (b) && TREE_CODE (b->decl) == VAR_DECL && TREE_STATIC (b->decl) && TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (b->decl)) && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { /* Array type completed in inner scope, which should be diagnosed if the completion does not have size 1 and it does not get completed in the file scope. */ b->inner_comp = true; } b = b->shadowed; } /* If a matching external declaration has been found, set its type to the composite of all the types of that declaration. After the consistency checks, it will be reset to the composite of the visible types only. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && b->u.type) TREE_TYPE (b->decl) = b->u.type; /* The point of the same_translation_unit_p check here is, we want to detect a duplicate decl for a construct like foo() { extern bar(); } ... static bar(); but not if they are in different translation units. In any case, the static does not go in the externals scope. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && duplicate_decls (x, b->decl)) { tree thistype; if (vistype) { if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b->decl); } else thistype = type; b->u.type = TREE_TYPE (b->decl); if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b->u.type)); TREE_TYPE (b->decl) = thistype; bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true, locus); return b->decl; } else if (TREE_PUBLIC (x)) { if (visdecl && !b && duplicate_decls (x, visdecl)) { /* An external declaration at block scope referring to a visible entity with internal linkage. The composite type will already be correct for this scope, so we just need to fall through to make the declaration in this scope. */ nested = true; x = visdecl; } else { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, locus); nested = true; } } } if (TREE_CODE (x) != PARM_DECL) warn_if_shadowing (x); skip_external_and_shadow_checks: if (TREE_CODE (x) == TYPE_DECL) { /* So this is a typedef, set its underlying type. */ set_underlying_type (x); /* If X is a typedef defined in the current function, record it for the purpose of implementing the -Wunused-local-typedefs warning. */ record_locally_defined_typedef (x); } bind (name, x, scope, /*invisible=*/false, nested, locus); /* If x's type is incomplete because it's based on a structure or union which has not yet been fully declared, attach it to that structure or union type, so we can go back and complete the variable declaration later, if the structure or union gets fully declared. If the input is erroneous, we can have error_mark in the type slot (e.g. "f(void a, ...)") - that doesn't count as an incomplete type. */ if (TREE_TYPE (x) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (x))) { tree element = TREE_TYPE (x); while (TREE_CODE (element) == ARRAY_TYPE) element = TREE_TYPE (element); element = TYPE_MAIN_VARIANT (element); if ((TREE_CODE (element) == RECORD_TYPE || TREE_CODE (element) == UNION_TYPE) && (TREE_CODE (x) != TYPE_DECL || TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE) && !COMPLETE_TYPE_P (element)) C_TYPE_INCOMPLETE_VARS (element) = tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element)); } return x; } /* Record X as belonging to file scope. This is used only internally by the Objective-C front end, and is limited to its needs. duplicate_decls is not called; if there is any preexisting decl for this identifier, it is an ICE. */ tree pushdecl_top_level (tree x) { tree name; bool nested = false; gcc_assert (TREE_CODE (x) == VAR_DECL || TREE_CODE (x) == CONST_DECL); name = DECL_NAME (x); gcc_assert (TREE_CODE (x) == CONST_DECL || !I_SYMBOL_BINDING (name)); if (TREE_PUBLIC (x)) { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); nested = true; } if (file_scope) bind (name, x, file_scope, /*invisible=*/false, nested, UNKNOWN_LOCATION); return x; } static void implicit_decl_warning (location_t loc, tree id, tree olddecl) { if (warn_implicit_function_declaration) { bool warned; if (flag_isoc99) warned = pedwarn (loc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE", id); else warned = warning_at (loc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE"), id); if (olddecl && warned) locate_old_decl (olddecl); } } /* This function represents mapping of a function code FCODE to its respective header. */ static const char * header_for_builtin_fn (enum built_in_function fcode) { switch (fcode) { CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ASIN): CASE_FLT_FN (BUILT_IN_ASINH): CASE_FLT_FN (BUILT_IN_ATAN): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_ATAN2): CASE_FLT_FN (BUILT_IN_CBRT): CASE_FLT_FN (BUILT_IN_CEIL): CASE_FLT_FN (BUILT_IN_COPYSIGN): CASE_FLT_FN (BUILT_IN_COS): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_ERF): CASE_FLT_FN (BUILT_IN_ERFC): CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_FABS): CASE_FLT_FN (BUILT_IN_FDIM): CASE_FLT_FN (BUILT_IN_FLOOR): CASE_FLT_FN (BUILT_IN_FMA): CASE_FLT_FN (BUILT_IN_FMAX): CASE_FLT_FN (BUILT_IN_FMIN): CASE_FLT_FN (BUILT_IN_FMOD): CASE_FLT_FN (BUILT_IN_FREXP): CASE_FLT_FN (BUILT_IN_HYPOT): CASE_FLT_FN (BUILT_IN_ILOGB): CASE_FLT_FN (BUILT_IN_LDEXP): CASE_FLT_FN (BUILT_IN_LGAMMA): CASE_FLT_FN (BUILT_IN_LLRINT): CASE_FLT_FN (BUILT_IN_LLROUND): CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOGB): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_MODF): CASE_FLT_FN (BUILT_IN_NAN): CASE_FLT_FN (BUILT_IN_NEARBYINT): CASE_FLT_FN (BUILT_IN_NEXTAFTER): CASE_FLT_FN (BUILT_IN_NEXTTOWARD): CASE_FLT_FN (BUILT_IN_POW): CASE_FLT_FN (BUILT_IN_REMAINDER): CASE_FLT_FN (BUILT_IN_REMQUO): CASE_FLT_FN (BUILT_IN_RINT): CASE_FLT_FN (BUILT_IN_ROUND): CASE_FLT_FN (BUILT_IN_SCALBLN): CASE_FLT_FN (BUILT_IN_SCALBN): CASE_FLT_FN (BUILT_IN_SIN): CASE_FLT_FN (BUILT_IN_SINH): CASE_FLT_FN (BUILT_IN_SINCOS): CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN (BUILT_IN_TAN): CASE_FLT_FN (BUILT_IN_TANH): CASE_FLT_FN (BUILT_IN_TGAMMA): CASE_FLT_FN (BUILT_IN_TRUNC): case BUILT_IN_ISINF: case BUILT_IN_ISNAN: return "<math.h>"; CASE_FLT_FN (BUILT_IN_CABS): CASE_FLT_FN (BUILT_IN_CACOS): CASE_FLT_FN (BUILT_IN_CACOSH): CASE_FLT_FN (BUILT_IN_CARG): CASE_FLT_FN (BUILT_IN_CASIN): CASE_FLT_FN (BUILT_IN_CASINH): CASE_FLT_FN (BUILT_IN_CATAN): CASE_FLT_FN (BUILT_IN_CATANH): CASE_FLT_FN (BUILT_IN_CCOS): CASE_FLT_FN (BUILT_IN_CCOSH): CASE_FLT_FN (BUILT_IN_CEXP): CASE_FLT_FN (BUILT_IN_CIMAG): CASE_FLT_FN (BUILT_IN_CLOG): CASE_FLT_FN (BUILT_IN_CONJ): CASE_FLT_FN (BUILT_IN_CPOW): CASE_FLT_FN (BUILT_IN_CPROJ): CASE_FLT_FN (BUILT_IN_CREAL): CASE_FLT_FN (BUILT_IN_CSIN): CASE_FLT_FN (BUILT_IN_CSINH): CASE_FLT_FN (BUILT_IN_CSQRT): CASE_FLT_FN (BUILT_IN_CTAN): CASE_FLT_FN (BUILT_IN_CTANH): return "<complex.h>"; case BUILT_IN_MEMCHR: case BUILT_IN_MEMCMP: case BUILT_IN_MEMCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: case BUILT_IN_STRCAT: case BUILT_IN_STRCHR: case BUILT_IN_STRCMP: case BUILT_IN_STRCPY: case BUILT_IN_STRCSPN: case BUILT_IN_STRLEN: case BUILT_IN_STRNCAT: case BUILT_IN_STRNCMP: case BUILT_IN_STRNCPY: case BUILT_IN_STRPBRK: case BUILT_IN_STRRCHR: case BUILT_IN_STRSPN: case BUILT_IN_STRSTR: return "<string.h>"; case BUILT_IN_FPRINTF: case BUILT_IN_PUTC: case BUILT_IN_FPUTC: case BUILT_IN_FPUTS: case BUILT_IN_FSCANF: case BUILT_IN_FWRITE: case BUILT_IN_PRINTF: case BUILT_IN_PUTCHAR: case BUILT_IN_PUTS: case BUILT_IN_SCANF: case BUILT_IN_SNPRINTF: case BUILT_IN_SPRINTF: case BUILT_IN_SSCANF: case BUILT_IN_VFPRINTF: case BUILT_IN_VFSCANF: case BUILT_IN_VPRINTF: case BUILT_IN_VSCANF: case BUILT_IN_VSNPRINTF: case BUILT_IN_VSPRINTF: case BUILT_IN_VSSCANF: return "<stdio.h>"; case BUILT_IN_ISALNUM: case BUILT_IN_ISALPHA: case BUILT_IN_ISBLANK: case BUILT_IN_ISCNTRL: case BUILT_IN_ISDIGIT: case BUILT_IN_ISGRAPH: case BUILT_IN_ISLOWER: case BUILT_IN_ISPRINT: case BUILT_IN_ISPUNCT: case BUILT_IN_ISSPACE: case BUILT_IN_ISUPPER: case BUILT_IN_ISXDIGIT: case BUILT_IN_TOLOWER: case BUILT_IN_TOUPPER: return "<ctype.h>"; case BUILT_IN_ISWALNUM: case BUILT_IN_ISWALPHA: case BUILT_IN_ISWBLANK: case BUILT_IN_ISWCNTRL: case BUILT_IN_ISWDIGIT: case BUILT_IN_ISWGRAPH: case BUILT_IN_ISWLOWER: case BUILT_IN_ISWPRINT: case BUILT_IN_ISWPUNCT: case BUILT_IN_ISWSPACE: case BUILT_IN_ISWUPPER: case BUILT_IN_ISWXDIGIT: case BUILT_IN_TOWLOWER: case BUILT_IN_TOWUPPER: return "<wctype.h>"; case BUILT_IN_ABORT: case BUILT_IN_ABS: case BUILT_IN_CALLOC: case BUILT_IN_EXIT: case BUILT_IN_FREE: case BUILT_IN_LABS: case BUILT_IN_LLABS: case BUILT_IN_MALLOC: case BUILT_IN_REALLOC: case BUILT_IN__EXIT2: case BUILT_IN_ALIGNED_ALLOC: return "<stdlib.h>"; case BUILT_IN_IMAXABS: return "<inttypes.h>"; case BUILT_IN_STRFTIME: return "<time.h>"; default: return NULL; } } /* Generate an implicit declaration for identifier FUNCTIONID at LOC as a function of type int (). */ tree implicitly_declare (location_t loc, tree functionid) { struct c_binding *b; tree decl = 0; tree asmspec_tree; for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed) { if (B_IN_SCOPE (b, external_scope)) { decl = b->decl; break; } } if (decl) { if (decl == error_mark_node) return decl; /* FIXME: Objective-C has weird not-really-builtin functions which are supposed to be visible automatically. They wind up in the external scope because they're pushed before the file scope gets created. Catch this here and rebind them into the file scope. */ if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl)) { bind (functionid, decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } else { tree newtype = default_function_type; if (b->u.type) TREE_TYPE (decl) = b->u.type; /* Implicit declaration of a function already declared (somehow) in a different scope, or as a built-in. If this is the first time this has happened, warn; then recycle the old declaration but with the new type. */ if (!C_DECL_IMPLICIT (decl)) { implicit_decl_warning (loc, functionid, decl); C_DECL_IMPLICIT (decl) = 1; } if (DECL_BUILT_IN (decl)) { newtype = build_type_attribute_variant (newtype, TYPE_ATTRIBUTES (TREE_TYPE (decl))); if (!comptypes (newtype, TREE_TYPE (decl))) { bool warned = warning_at (loc, 0, "incompatible implicit " "declaration of built-in " "function %qD", decl); /* See if we can hint which header to include. */ const char *header = header_for_builtin_fn (DECL_FUNCTION_CODE (decl)); if (header != NULL && warned) inform (loc, "include %qs or provide a declaration of %qD", header, decl); newtype = TREE_TYPE (decl); } } else { if (!comptypes (newtype, TREE_TYPE (decl))) { error_at (loc, "incompatible implicit declaration of " "function %qD", decl); locate_old_decl (decl); } } b->u.type = TREE_TYPE (decl); TREE_TYPE (decl) = newtype; bind (functionid, decl, current_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } } /* Not seen before. */ decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; C_DECL_IMPLICIT (decl) = 1; implicit_decl_warning (loc, functionid, 0); asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL); if (asmspec_tree) set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree)); /* C89 says implicit declarations are in the innermost block. So we record the decl in the standard fashion. */ decl = pushdecl (decl); /* No need to call objc_check_decl here - it's a function type. */ rest_of_decl_compilation (decl, 0, 0); /* Write a record describing this implicit function declaration to the prototypes file (if requested). */ gen_aux_info_record (decl, 0, 1, 0); /* Possibly apply some default attributes to this implicit declaration. */ decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Issue an error message for a reference to an undeclared variable ID, including a reference to a builtin outside of function-call context. Establish a binding of the identifier to error_mark_node in an appropriate scope, which will suppress further errors for the same identifier. The error message should be given location LOC. */ void undeclared_variable (location_t loc, tree id) { static bool already = false; struct c_scope *scope; if (current_function_decl == 0) { error_at (loc, "%qE undeclared here (not in a function)", id); scope = current_scope; } else { if (!objc_diagnose_private_ivar (id)) error_at (loc, "%qE undeclared (first use in this function)", id); if (!already) { inform (loc, "each undeclared identifier is reported only" " once for each function it appears in"); already = true; } /* If we are parsing old-style parameter decls, current_function_decl will be nonnull but current_function_scope will be null. */ scope = current_function_scope ? current_function_scope : current_scope; } bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of lookup_label, declare_label, define_label: construct a LABEL_DECL with all the proper frills. Also create a struct c_label_vars initialized for the current scope. */ static tree make_label (location_t location, tree name, bool defining, struct c_label_vars **p_label_vars) { tree label = build_decl (location, LABEL_DECL, name, void_type_node); DECL_CONTEXT (label) = current_function_decl; DECL_MODE (label) = VOIDmode; c_label_vars *label_vars = ggc_alloc<c_label_vars> (); label_vars->shadowed = NULL; set_spot_bindings (&label_vars->label_bindings, defining); label_vars->decls_in_scope = make_tree_vector (); label_vars->gotos = NULL; *p_label_vars = label_vars; return label; } /* Get the LABEL_DECL corresponding to identifier NAME as a label. Create one if none exists so far for the current function. This is called when a label is used in a goto expression or has its address taken. */ tree lookup_label (tree name) { tree label; struct c_label_vars *label_vars; if (current_function_scope == 0) { error ("label %qE referenced outside of any function", name); return 0; } /* Use a label already defined or ref'd with this name, but not if it is inherited from a containing function and wasn't declared using __label__. */ label = I_LABEL_DECL (name); if (label && (DECL_CONTEXT (label) == current_function_decl || C_DECLARED_LABEL_FLAG (label))) { /* If the label has only been declared, update its apparent location to point here, for better diagnostics if it turns out not to have been defined. */ if (DECL_INITIAL (label) == NULL_TREE) DECL_SOURCE_LOCATION (label) = input_location; return label; } /* No label binding for that identifier; make one. */ label = make_label (input_location, name, false, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); return label; } /* Issue a warning about DECL for a goto statement at GOTO_LOC going to LABEL. */ static void warn_about_goto (location_t goto_loc, tree label, tree decl) { if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) error_at (goto_loc, "jump into scope of identifier with variably modified type"); else warning_at (goto_loc, OPT_Wjump_misses_init, "jump skips variable initialization"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl); } /* Look up a label because of a goto statement. This is like lookup_label, but also issues any appropriate warnings. */ tree lookup_label_for_goto (location_t loc, tree name) { tree label; struct c_label_vars *label_vars; unsigned int ix; tree decl; label = lookup_label (name); if (label == NULL_TREE) return NULL_TREE; /* If we are jumping to a different function, we can't issue any useful warnings. */ if (DECL_CONTEXT (label) != current_function_decl) { gcc_assert (C_DECLARED_LABEL_FLAG (label)); return label; } label_vars = I_LABEL_BINDING (name)->u.label; /* If the label has not yet been defined, then push this goto on a list for possible later warnings. */ if (label_vars->label_bindings.scope == NULL) { c_goto_bindings *g = ggc_alloc<c_goto_bindings> (); g->loc = loc; set_spot_bindings (&g->goto_bindings, true); vec_safe_push (label_vars->gotos, g); return label; } /* If there are any decls in label_vars->decls_in_scope, then this goto has missed the declaration of the decl. This happens for a case like int i = 1; lab: ... goto lab; Issue a warning or error. */ FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl) warn_about_goto (loc, label, decl); if (label_vars->label_bindings.left_stmt_expr) { error_at (loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } return label; } /* Make a label named NAME in the current function, shadowing silently any that may be inherited from containing functions or containing scopes. This is called for __label__ declarations. */ tree declare_label (tree name) { struct c_binding *b = I_LABEL_BINDING (name); tree label; struct c_label_vars *label_vars; /* Check to make sure that the label hasn't already been declared at this scope */ if (b && B_IN_CURRENT_SCOPE (b)) { error ("duplicate label declaration %qE", name); locate_old_decl (b->decl); /* Just use the previous declaration. */ return b->decl; } label = make_label (input_location, name, false, &label_vars); C_DECLARED_LABEL_FLAG (label) = 1; /* Declared labels go in the current scope. */ bind_label (name, label, current_scope, label_vars); return label; } /* When we define a label, issue any appropriate warnings if there are any gotos earlier in the function which jump to this label. */ static void check_earlier_gotos (tree label, struct c_label_vars* label_vars) { unsigned int ix; struct c_goto_bindings *g; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { struct c_binding *b; struct c_scope *scope; /* We have a goto to this label. The goto is going forward. In g->scope, the goto is going to skip any binding which was defined after g->bindings_in_scope. */ if (g->goto_bindings.scope->has_jump_unsafe_decl) { for (b = g->goto_bindings.scope->bindings; b != g->goto_bindings.bindings_in_scope; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } /* We also need to warn about decls defined in any scopes between the scope of the label and the scope of the goto. */ for (scope = label_vars->label_bindings.scope; scope != g->goto_bindings.scope; scope = scope->outer) { gcc_assert (scope != NULL); if (scope->has_jump_unsafe_decl) { if (scope == label_vars->label_bindings.scope) b = label_vars->label_bindings.bindings_in_scope; else b = scope->bindings; for (; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } } if (g->goto_bindings.stmt_exprs > 0) { error_at (g->loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } } /* Now that the label is defined, we will issue warnings about subsequent gotos to this label when we see them. */ vec_safe_truncate (label_vars->gotos, 0); label_vars->gotos = NULL; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label, if the definition is valid. Otherwise return 0. */ tree define_label (location_t location, tree name) { /* Find any preexisting label with this name. It is an error if that label has already been defined in this function, or if there is a containing function with a declared label with the same name. */ tree label = I_LABEL_DECL (name); if (label && ((DECL_CONTEXT (label) == current_function_decl && DECL_INITIAL (label) != 0) || (DECL_CONTEXT (label) != current_function_decl && C_DECLARED_LABEL_FLAG (label)))) { error_at (location, "duplicate label %qD", label); locate_old_decl (label); return 0; } else if (label && DECL_CONTEXT (label) == current_function_decl) { struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label; /* The label has been used or declared already in this function, but not defined. Update its location to point to this definition. */ DECL_SOURCE_LOCATION (label) = location; set_spot_bindings (&label_vars->label_bindings, true); /* Issue warnings as required about any goto statements from earlier in the function. */ check_earlier_gotos (label, label_vars); } else { struct c_label_vars *label_vars; /* No label binding for that identifier; make one. */ label = make_label (location, name, true, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); } if (!in_system_header_at (input_location) && lookup_name (name)) warning_at (location, OPT_Wtraditional, "traditional C lacks a separate namespace " "for labels, identifier %qE conflicts", name); /* Mark label as having been defined. */ DECL_INITIAL (label) = error_mark_node; return label; } /* Get the bindings for a new switch statement. This is used to issue warnings as appropriate for jumps from the switch to case or default labels. */ struct c_spot_bindings * c_get_switch_bindings (void) { struct c_spot_bindings *switch_bindings; switch_bindings = XNEW (struct c_spot_bindings); set_spot_bindings (switch_bindings, true); return switch_bindings; } void c_release_switch_bindings (struct c_spot_bindings *bindings) { gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr); XDELETE (bindings); } /* This is called at the point of a case or default label to issue warnings about decls as needed. It returns true if it found an error, not just a warning. */ bool c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings, location_t switch_loc, location_t case_loc) { bool saw_error; struct c_scope *scope; saw_error = false; for (scope = current_scope; scope != switch_bindings->scope; scope = scope->outer) { struct c_binding *b; gcc_assert (scope != NULL); if (!scope->has_jump_unsafe_decl) continue; for (b = scope->bindings; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) { if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE)) { saw_error = true; error_at (case_loc, ("switch jumps into scope of identifier with " "variably modified type")); } else warning_at (case_loc, OPT_Wjump_misses_init, "switch jumps over variable initialization"); inform (switch_loc, "switch starts here"); inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here", b->decl); } } } if (switch_bindings->stmt_exprs > 0) { saw_error = true; error_at (case_loc, "switch jumps into statement expression"); inform (switch_loc, "switch starts here"); } return saw_error; } /* Given NAME, an IDENTIFIER_NODE, return the structure (or union or enum) definition for that name. If THISLEVEL_ONLY is nonzero, searches only the current_scope. CODE says which kind of type the caller wants; it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE. If PLOC is not NULL and this returns non-null, it sets *PLOC to the location where the tag was defined. If the wrong kind of type is found, an error is reported. */ static tree lookup_tag (enum tree_code code, tree name, int thislevel_only, location_t *ploc) { struct c_binding *b = I_TAG_BINDING (name); int thislevel = 0; if (!b || !b->decl) return 0; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ if (thislevel_only || TREE_CODE (b->decl) != code) { /* For our purposes, a tag in the external scope is the same as a tag in the file scope. (Primarily relevant to Objective-C and its builtin structure tags, which get pushed before the file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) thislevel = 1; } if (thislevel_only && !thislevel) return 0; if (TREE_CODE (b->decl) != code) { /* Definition isn't the kind we were looking for. */ pending_invalid_xref = name; pending_invalid_xref_location = input_location; /* If in the same binding level as a declaration as a tag of a different type, this must not be allowed to shadow that tag, so give the error immediately. (For example, "struct foo; union foo;" is invalid.) */ if (thislevel) pending_xref_error (); } if (ploc != NULL) *ploc = b->locus; return b->decl; } /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid when used in the `struct foo;' construct for shadowing. */ void pending_xref_error (void) { if (pending_invalid_xref != 0) error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag", pending_invalid_xref); pending_invalid_xref = 0; } /* Look up NAME in the current scope and its superiors in the namespace of variables, functions and typedefs. Return a ..._DECL node of some kind representing its definition, or return 0 if it is undefined. */ tree lookup_name (tree name) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b && !b->invisible) { maybe_record_typedef_use (b->decl); return b->decl; } return 0; } /* Similar to `lookup_name' but look only at the indicated scope. */ static tree lookup_name_in_scope (tree name, struct c_scope *scope) { struct c_binding *b; for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; return 0; } /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *) 0). Initialize the global scope. Make definitions for built-in primitive functions. */ void c_init_decl_processing (void) { location_t save_loc = input_location; /* Initialize reserved words for parser. */ c_parse_init (); current_function_decl = 0; gcc_obstack_init (&parser_obstack); /* Make the externals scope. */ push_scope (); external_scope = current_scope; /* Declarations from c_common_nodes_and_builtins must not be associated with this input file, lest we get differences between using and not using preprocessed headers. */ input_location = BUILTINS_LOCATION; c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ truthvalue_type_node = integer_type_node; truthvalue_true_node = integer_one_node; truthvalue_false_node = integer_zero_node; /* Even in C99, which has a real boolean type. */ pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"), boolean_type_node)); input_location = save_loc; make_fname_decl = c_make_fname_decl; start_fname_decls (); } /* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. As we don't yet implement delayed emission of static data, we mark the decl as emitted so it is not placed in the output. Anything using it must therefore pull out the STRING_CST initializer directly. FIXME. */ static tree c_make_fname_decl (location_t loc, tree id, int type_dep) { const char *name = fname_as_string (type_dep); tree decl, type, init; size_t length = strlen (name); type = build_array_type (char_type_node, build_index_type (size_int (length))); type = c_build_qualified_type (type, TYPE_QUAL_CONST); decl = build_decl (loc, VAR_DECL, id, type); TREE_STATIC (decl) = 1; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; init = build_string (length + 1, name); free (CONST_CAST (char *, name)); TREE_TYPE (init) = type; DECL_INITIAL (decl) = init; TREE_USED (decl) = 1; if (current_function_decl /* For invalid programs like this: void foo() const char* p = __FUNCTION__; the __FUNCTION__ is believed to appear in K&R style function parameter declarator. In that case we still don't have function_scope. */ && (!seen_error () || current_function_scope)) { DECL_CONTEXT (decl) = current_function_decl; bind (id, decl, current_function_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } finish_decl (decl, loc, init, NULL_TREE, NULL_TREE); return decl; } tree c_builtin_function (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); /* Should never be called on a symbol with a preexisting meaning. */ gcc_assert (!I_SYMBOL_BINDING (id)); bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } tree c_builtin_function_ext_scope (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); if (external_scope) bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. */ void shadow_tag (const struct c_declspecs *declspecs) { shadow_tag_warned (declspecs, 0); } /* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning, but no pedwarn. */ void shadow_tag_warned (const struct c_declspecs *declspecs, int warned) { bool found_tag = false; if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p) { tree value = declspecs->type; enum tree_code code = TREE_CODE (value); if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE) /* Used to test also that TYPE_SIZE (value) != 0. That caused warning for `struct foo;' at top level in the file. */ { tree name = TYPE_NAME (value); tree t; found_tag = true; if (declspecs->restrict_p) { error ("invalid use of %<restrict%>"); warned = 1; } if (name == 0) { if (warned != 1 && code != ENUMERAL_TYPE) /* Empty unnamed enum OK */ { pedwarn (input_location, 0, "unnamed struct/union that defines no instances"); warned = 1; } } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->storage_class != csc_none) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with storage class specifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with type qualifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->alignas_p) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with %<_Alignas%> " "does not redeclare tag"); warned = 1; pending_xref_error (); } else { pending_invalid_xref = 0; t = lookup_tag (code, name, 1, NULL); if (t == 0) { t = make_node (code); pushtag (input_location, name, t); } } } else { if (warned != 1 && !in_system_header_at (input_location)) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } } } else if (warned != 1 && !in_system_header_at (input_location) && declspecs->typedef_p) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } pending_invalid_xref = 0; if (declspecs->inline_p) { error ("%<inline%> in empty declaration"); warned = 1; } if (declspecs->noreturn_p) { error ("%<_Noreturn%> in empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_auto) { error ("%<auto%> in file-scope empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_register) { error ("%<register%> in file-scope empty declaration"); warned = 1; } if (!warned && !in_system_header_at (input_location) && declspecs->storage_class != csc_none) { warning (0, "useless storage class specifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->thread_p) { warning (0, "useless %qs in empty declaration", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); warned = 2; } if (!warned && !in_system_header_at (input_location) && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { warning (0, "useless type qualifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->alignas_p) { warning (0, "useless %<_Alignas%> in empty declaration"); warned = 2; } if (warned != 1) { if (!found_tag) pedwarn (input_location, 0, "empty declaration"); } } /* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_* bits. SPECS represents declaration specifiers that the grammar only permits to contain type qualifiers and attributes. */ int quals_from_declspecs (const struct c_declspecs *specs) { int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0) | (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0) | (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0) | (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0) | (ENCODE_QUAL_ADDR_SPACE (specs->address_space))); gcc_assert (!specs->type && !specs->decl_attr && specs->typespec_word == cts_none && specs->storage_class == csc_none && !specs->typedef_p && !specs->explicit_signed_p && !specs->deprecated_p && !specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p && !specs->inline_p && !specs->noreturn_p && !specs->thread_p); return quals; } /* Construct an array declarator. LOC is the location of the beginning of the array (usually the opening brace). EXPR is the expression inside [], or NULL_TREE. QUALS are the type qualifiers inside the [] (to be applied to the pointer to which a parameter array is converted). STATIC_P is true if "static" is inside the [], false otherwise. VLA_UNSPEC_P is true if the array is [*], a VLA of unspecified length which is nevertheless a complete type, false otherwise. The field for the contained declarator is left to be filled in by set_array_declarator_inner. */ struct c_declarator * build_array_declarator (location_t loc, tree expr, struct c_declspecs *quals, bool static_p, bool vla_unspec_p) { struct c_declarator *declarator = XOBNEW (&parser_obstack, struct c_declarator); declarator->id_loc = loc; declarator->kind = cdk_array; declarator->declarator = 0; declarator->u.array.dimen = expr; if (quals) { declarator->u.array.attrs = quals->attrs; declarator->u.array.quals = quals_from_declspecs (quals); } else { declarator->u.array.attrs = NULL_TREE; declarator->u.array.quals = 0; } declarator->u.array.static_p = static_p; declarator->u.array.vla_unspec_p = vla_unspec_p; if (static_p || quals != NULL) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<static%> or type " "qualifiers in parameter array declarators"); if (vla_unspec_p) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<[*]%> array declarators"); if (vla_unspec_p) { if (!current_scope->parm_flag) { /* C99 6.7.5.2p4 */ error_at (loc, "%<[*]%> not allowed in other than " "function prototype scope"); declarator->u.array.vla_unspec_p = false; return NULL; } current_scope->had_vla_unspec = true; } return declarator; } /* Set the contained declarator of an array declarator. DECL is the declarator, as constructed by build_array_declarator; INNER is what appears on the left of the []. */ struct c_declarator * set_array_declarator_inner (struct c_declarator *decl, struct c_declarator *inner) { decl->declarator = inner; return decl; } /* INIT is a constructor that forms DECL's initializer. If the final element initializes a flexible array field, add the size of that initializer to DECL's size. */ static void add_flexible_array_elts_to_size (tree decl, tree init) { tree elt, type; if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init))) return; elt = CONSTRUCTOR_ELTS (init)->last ().value; type = TREE_TYPE (elt); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == NULL_TREE && TYPE_DOMAIN (type) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE) { complete_array_type (&type, elt, false); DECL_SIZE (decl) = size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type)); DECL_SIZE_UNIT (decl) = size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type)); } } /* Decode a "typename", such as "int **", returning a ..._TYPE node. Set *EXPR, if EXPR not NULL, to any expression to be evaluated before the type name, and set *EXPR_CONST_OPERANDS, if EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may appear in a constant expression. */ tree groktypename (struct c_type_name *type_name, tree *expr, bool *expr_const_operands) { tree type; tree attrs = type_name->specs->attrs; type_name->specs->attrs = NULL_TREE; type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME, false, NULL, &attrs, expr, expr_const_operands, DEPRECATED_NORMAL); /* Apply attributes. */ decl_attributes (&type, attrs, 0); return type; } /* Wrapper for decl_attributes that adds some implicit attributes to VAR_DECLs or FUNCTION_DECLs. */ static tree c_decl_attributes (tree *node, tree attributes, int flags) { /* Add implicit "omp declare target" attribute if requested. */ if (current_omp_declare_target_attribute && ((TREE_CODE (*node) == VAR_DECL && (TREE_STATIC (*node) || DECL_EXTERNAL (*node))) || TREE_CODE (*node) == FUNCTION_DECL)) { if (TREE_CODE (*node) == VAR_DECL && ((DECL_CONTEXT (*node) && TREE_CODE (DECL_CONTEXT (*node)) == FUNCTION_DECL) || (current_function_decl && !DECL_EXTERNAL (*node)))) error ("%q+D in block scope inside of declare target directive", *node); else if (TREE_CODE (*node) == VAR_DECL && !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node))) error ("%q+D in declare target directive does not have mappable type", *node); else attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); } return decl_attributes (node, attributes, flags); } /* Decode a declarator in an ordinary declaration or data definition. This is called as soon as the type information and variable name have been parsed, before parsing the initializer if any. Here we create the ..._DECL node, fill in its type, and put it on the list of decls for the current context. The ..._DECL node is returned as the value. Exception: for arrays where the length is not specified, the type is left null, to be filled in by `finish_decl'. Function definitions do not come here; they go to start_function instead. However, external and forward declarations of functions do go through here. Structure field declarations are done by grokfield and not through here. */ tree start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs, bool initialized, tree attributes) { tree decl; tree tem; tree expr = NULL_TREE; enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, NULL, &attributes, &expr, NULL, deprecated_state); if (!decl || decl == error_mark_node) return NULL_TREE; if (expr) add_stmt (fold_convert (void_type_node, expr)); if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl))) warning (OPT_Wmain, "%q+D is usually a function", decl); if (initialized) /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell 'finish_decl' to ignore the initializer once it is parsed. */ switch (TREE_CODE (decl)) { case TYPE_DECL: error ("typedef %qD is initialized (use __typeof__ instead)", decl); initialized = 0; break; case FUNCTION_DECL: error ("function %qD is initialized like a variable", decl); initialized = 0; break; case PARM_DECL: /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */ error ("parameter %qD is initialized", decl); initialized = 0; break; default: /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ /* This can happen if the array size is an undefined macro. We already gave a warning, so we don't need another one. */ if (TREE_TYPE (decl) == error_mark_node) initialized = 0; else if (COMPLETE_TYPE_P (TREE_TYPE (decl))) { /* A complete type is ok if size is fixed. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST || C_DECL_VARIABLE_SIZE (decl)) { error ("variable-sized object may not be initialized"); initialized = 0; } } else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) { error ("variable %qD has initializer but incomplete type", decl); initialized = 0; } else if (C_DECL_VARIABLE_SIZE (decl)) { /* Although C99 is unclear about whether incomplete arrays of VLAs themselves count as VLAs, it does not make sense to permit them to be initialized given that ordinary VLAs may not be initialized. */ error ("variable-sized object may not be initialized"); initialized = 0; } } if (initialized) { if (current_scope == file_scope) TREE_STATIC (decl) = 1; /* Tell 'pushdecl' this is an initialized decl even though we don't yet have the initializer expression. Also tell 'finish_decl' it may store the real initializer. */ DECL_INITIAL (decl) = error_mark_node; } /* If this is a function declaration, write a record describing it to the prototypes file (if requested). */ if (TREE_CODE (decl) == FUNCTION_DECL) gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl))); /* ANSI specifies that a tentative definition which is not merged with a non-tentative definition behaves exactly like a definition with an initializer equal to zero. (Section 3.7.2) -fno-common gives strict ANSI behavior, though this tends to break a large body of code that grew up without this rule. Thread-local variables are never common, since there's no entrenched body of code to break, and it allows more efficient variable references in the presence of dynamic linking. */ if (TREE_CODE (decl) == VAR_DECL && !initialized && TREE_PUBLIC (decl) && !DECL_THREAD_LOCAL_P (decl) && !flag_no_common) DECL_COMMON (decl) = 1; /* Set attributes here so if duplicate decl, will have proper attributes. */ c_decl_attributes (&decl, attributes, 0); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl)) || current_function_decl)) { if (declspecs->storage_class == csc_auto && current_scope != file_scope) ; else if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl); } if (TREE_CODE (decl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (decl))) { struct c_declarator *ce = declarator; if (ce->kind == cdk_pointer) ce = declarator->declarator; if (ce->kind == cdk_function) { tree args = ce->u.arg_info->parms; for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (type && INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning (OPT_Wattributes, "inline function %q+D given attribute noinline", decl); /* C99 6.7.4p3: An inline definition of a function with external linkage shall not contain a definition of a modifiable object with static storage duration... */ if (TREE_CODE (decl) == VAR_DECL && current_scope != file_scope && TREE_STATIC (decl) && !TREE_READONLY (decl) && DECL_DECLARED_INLINE_P (current_function_decl) && DECL_EXTERNAL (current_function_decl)) record_inline_static (input_location, current_function_decl, decl, csi_modifiable); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL)) objc_check_global_decl (decl); /* Add this decl to the current scope. TEM may equal DECL or it may be a previous decl of the same name. */ tem = pushdecl (decl); if (initialized && DECL_EXTERNAL (tem)) { DECL_EXTERNAL (tem) = 0; TREE_STATIC (tem) = 1; } return tem; } /* Subroutine of finish_decl. TYPE is the type of an uninitialized object DECL or the non-array element type if DECL is an uninitialized array. If that type has a const member, diagnose this. */ static void diagnose_uninitialized_cst_member (tree decl, tree type) { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { tree field_type; if (TREE_CODE (field) != FIELD_DECL) continue; field_type = strip_array_types (TREE_TYPE (field)); if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST) { warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const member in %qT is invalid in C++", strip_array_types (TREE_TYPE (decl))); inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field); } if (TREE_CODE (field_type) == RECORD_TYPE || TREE_CODE (field_type) == UNION_TYPE) diagnose_uninitialized_cst_member (decl, field_type); } } /* Finish processing of a declaration; install its initial value. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT_LOC is the location of the initial value. */ void finish_decl (tree decl, location_t init_loc, tree init, tree origtype, tree asmspec_tree) { tree type; bool was_incomplete = (DECL_SIZE (decl) == 0); const char *asmspec = 0; /* If a name was specified, get the string. */ if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL) && DECL_FILE_SCOPE_P (decl)) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl) && global_bindings_p ()) /* So decl is a global variable. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); /* If `start_decl' didn't like having an initialization, ignore it now. */ if (init != 0 && DECL_INITIAL (decl) == 0) init = 0; /* Don't crash if parm is initialized. */ if (TREE_CODE (decl) == PARM_DECL) init = 0; if (init) store_init_value (init_loc, decl, init, origtype); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == FIELD_DECL)) objc_check_decl (decl); type = TREE_TYPE (decl); /* Deduce size of array from initialization, if not already known. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0 && TREE_CODE (decl) != TYPE_DECL) { bool do_default = (TREE_STATIC (decl) /* Even if pedantic, an external linkage array may have incomplete type at first. */ ? pedantic && !TREE_PUBLIC (decl) : !DECL_EXTERNAL (decl)); int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), do_default); /* Get the completed type made by complete_array_type. */ type = TREE_TYPE (decl); switch (failure) { case 1: error ("initializer fails to determine size of %q+D", decl); break; case 2: if (do_default) error ("array size missing in %q+D", decl); /* If a `static' var's size isn't known, make it extern as well as static, so it does not get allocated. If it is not `static', then do not mark extern; finish_incomplete_decl will give it a default size and it will get allocated. */ else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl)) DECL_EXTERNAL (decl) = 1; break; case 3: error ("zero or negative size array %q+D", decl); break; case 0: /* For global variables, update the copy of the type that exists in the binding. */ if (TREE_PUBLIC (decl)) { struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl)); while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { if (b_ext->u.type && comptypes (b_ext->u.type, type)) b_ext->u.type = composite_type (b_ext->u.type, type); else b_ext->u.type = type; } } break; default: gcc_unreachable (); } if (DECL_INITIAL (decl)) TREE_TYPE (DECL_INITIAL (decl)) = type; relayout_decl (decl); } if (TREE_CODE (decl) == VAR_DECL) { if (init && TREE_CODE (init) == CONSTRUCTOR) add_flexible_array_elts_to_size (decl, init); if (DECL_SIZE (decl) == 0 && TREE_TYPE (decl) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (decl))) layout_decl (decl, 0); if (DECL_SIZE (decl) == 0 /* Don't give an error if we already gave one earlier. */ && TREE_TYPE (decl) != error_mark_node && (TREE_STATIC (decl) /* A static variable with an incomplete type is an error if it is initialized. Also if it is not file scope. Otherwise, let it through, but if it is not `extern' then it may cause an error message later. */ ? (DECL_INITIAL (decl) != 0 || !DECL_FILE_SCOPE_P (decl)) /* An automatic variable with an incomplete type is an error. */ : !DECL_EXTERNAL (decl))) { error ("storage size of %q+D isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl)) && DECL_SIZE (decl) != 0) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else { error ("storage size of %q+D isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } if (TREE_USED (type)) { TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; } } /* If this is a function and an assembler name is specified, reset DECL_RTL so we can give it its new name. Also, update builtin_decl if it was a normal built-in. */ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec) { if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } /* If #pragma weak was used, mark the decl weak now. */ maybe_apply_pragma_weak (decl); /* Output the assembler code and/or RTL code for variables and functions, unless the type is an undefined structure or union. If not, it will get done when the type is completed. */ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL) { /* Determine the ELF visibility. */ if (TREE_PUBLIC (decl)) c_determine_visibility (decl); /* This is a no-op in c-lang.c or something real in objc-act.c. */ if (c_dialect_objc ()) objc_check_decl (decl); if (asmspec) { /* If this is not a static variable, issue a warning. It doesn't make any sense to give an ASMSPEC for an ordinary, non-register local variable. Historically, GCC has accepted -- but ignored -- the ASMSPEC in this case. */ if (!DECL_FILE_SCOPE_P (decl) && TREE_CODE (decl) == VAR_DECL && !C_DECL_REGISTER (decl) && !TREE_STATIC (decl)) warning (0, "ignoring asm-specifier for non-static local " "variable %q+D", decl); else set_user_assembler_name (decl, asmspec); } if (DECL_FILE_SCOPE_P (decl)) { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; if (asmspec && C_DECL_REGISTER (decl)) DECL_HARD_REGISTER (decl) = 1; rest_of_decl_compilation (decl, true, 0); } else { /* In conjunction with an ASMSPEC, the `register' keyword indicates that we should place the variable in a particular register. */ if (asmspec && C_DECL_REGISTER (decl)) { DECL_HARD_REGISTER (decl) = 1; /* This cannot be done for a structure with volatile fields, on which DECL_REGISTER will have been reset. */ if (!DECL_REGISTER (decl)) error ("cannot put object with volatile field into register"); } if (TREE_CODE (decl) != FUNCTION_DECL) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ /* Note that DECL_SIZE can be null due to errors. */ if (DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } } if (!DECL_FILE_SCOPE_P (decl)) { /* Recompute the RTL of a local array now if it used to be an incomplete type. */ if (was_incomplete && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) { /* If we used it already as memory, it must stay in memory. */ TREE_ADDRESSABLE (decl) = TREE_USED (decl); /* If it's still incomplete now, no init will save it. */ if (DECL_SIZE (decl) == 0) DECL_INITIAL (decl) = 0; } } } if (TREE_CODE (decl) == TYPE_DECL) { if (!DECL_FILE_SCOPE_P (decl) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0); } /* Install a cleanup (aka destructor) if one was given. */ if (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl)) { tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); if (attr) { tree cleanup_id = TREE_VALUE (TREE_VALUE (attr)); tree cleanup_decl = lookup_name (cleanup_id); tree cleanup; vec<tree, va_gc> *v; /* Build "cleanup(&decl)" for the destructor. */ cleanup = build_unary_op (input_location, ADDR_EXPR, decl, 0); vec_alloc (v, 1); v->quick_push (cleanup); cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl), vNULL, cleanup_decl, v, NULL); vec_free (v); /* Don't warn about decl unused; the cleanup uses it. */ TREE_USED (decl) = 1; TREE_USED (cleanup_decl) = 1; DECL_READ_P (decl) = 1; push_cleanup (decl, cleanup, false); } } if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl) && DECL_INITIAL (decl) == NULL_TREE) { type = strip_array_types (type); if (TREE_READONLY (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const %qD is invalid in C++", decl); else if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (type)) diagnose_uninitialized_cst_member (decl, type); } invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* Given a parsed parameter declaration, decode it into a PARM_DECL. EXPR is NULL or a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ tree grokparm (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); return decl; } /* Given a parsed parameter declaration, decode it into a PARM_DECL and push that on the current scope. EXPR is a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ void push_parm_decl (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl; decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); decl = pushdecl (decl); finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE); } /* Mark all the parameter declarations to date as forward decls. Also diagnose use of this extension. */ void mark_forward_parm_decls (void) { struct c_binding *b; if (pedantic && !current_scope->warned_forward_parm_decls) { pedwarn (input_location, OPT_Wpedantic, "ISO C forbids forward parameter declarations"); current_scope->warned_forward_parm_decls = true; } for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL) TREE_ASM_WRITTEN (b->decl) = 1; } /* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound literal, which may be an incomplete array type completed by the initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound literal. NON_CONST is true if the initializers contain something that cannot occur in a constant expression. */ tree build_compound_literal (location_t loc, tree type, tree init, bool non_const) { /* We do not use start_decl here because we have a type, not a declarator; and do not use finish_decl because the decl should be stored inside the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */ tree decl; tree complit; tree stmt; if (type == error_mark_node || init == error_mark_node) return error_mark_node; decl = build_decl (loc, VAR_DECL, NULL_TREE, type); DECL_EXTERNAL (decl) = 0; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = (current_scope == file_scope); DECL_CONTEXT (decl) = current_function_decl; TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; TREE_TYPE (decl) = type; TREE_READONLY (decl) = (TYPE_READONLY (type) || (TREE_CODE (type) == ARRAY_TYPE && TYPE_READONLY (TREE_TYPE (type)))); store_init_value (loc, decl, init, NULL_TREE); if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type)) { int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), true); /* If complete_array_type returns 3, it means that the initial value of the compound literal is empty. Allow it. */ gcc_assert (failure == 0 || failure == 3); type = TREE_TYPE (decl); TREE_TYPE (DECL_INITIAL (decl)) = type; } if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { c_incomplete_type_error (NULL_TREE, type); return error_mark_node; } stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt); TREE_SIDE_EFFECTS (complit) = 1; layout_decl (decl, 0); if (TREE_STATIC (decl)) { /* This decl needs a name for the assembler output. */ set_compound_literal_name (decl); DECL_DEFER_OUTPUT (decl) = 1; DECL_COMDAT (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; pushdecl (decl); rest_of_decl_compilation (decl, 1, 0); } if (non_const) { complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit); C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1; } return complit; } /* Check the type of a compound literal. Here we just check that it is valid for C++. */ void check_compound_literal_type (location_t loc, struct c_type_name *type_name) { if (warn_cxx_compat && (type_name->specs->typespec_kind == ctsk_tagdef || type_name->specs->typespec_kind == ctsk_tagfirstref)) warning_at (loc, OPT_Wc___compat, "defining a type in a compound literal is invalid in C++"); } /* Determine whether TYPE is a structure with a flexible array member, or a union containing such a structure (possibly recursively). */ static bool flexible_array_type_p (tree type) { tree x; switch (TREE_CODE (type)) { case RECORD_TYPE: x = TYPE_FIELDS (type); if (x == NULL_TREE) return false; while (DECL_CHAIN (x) != NULL_TREE) x = DECL_CHAIN (x); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) return true; return false; case UNION_TYPE: for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x)) { if (flexible_array_type_p (TREE_TYPE (x))) return true; } return false; default: return false; } } /* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME, replacing with appropriate values if they are invalid. */ static void check_bitfield_type_and_width (tree *type, tree *width, tree orig_name) { tree type_mv; unsigned int max_width; unsigned HOST_WIDE_INT w; const char *name = (orig_name ? identifier_to_locale (IDENTIFIER_POINTER (orig_name)) : _("<anonymous>")); /* Detect and ignore out of range field width and process valid field widths. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (*width))) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } else { if (TREE_CODE (*width) != INTEGER_CST) { *width = c_fully_fold (*width, false, NULL); if (TREE_CODE (*width) == INTEGER_CST) pedwarn (input_location, OPT_Wpedantic, "bit-field %qs width not an integer constant expression", name); } if (TREE_CODE (*width) != INTEGER_CST) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } constant_expression_warning (*width); if (tree_int_cst_sgn (*width) < 0) { error ("negative width in bit-field %qs", name); *width = integer_one_node; } else if (integer_zerop (*width) && orig_name) { error ("zero width for bit-field %qs", name); *width = integer_one_node; } } /* Detect invalid bit-field type. */ if (TREE_CODE (*type) != INTEGER_TYPE && TREE_CODE (*type) != BOOLEAN_TYPE && TREE_CODE (*type) != ENUMERAL_TYPE) { error ("bit-field %qs has invalid type", name); *type = unsigned_type_node; } type_mv = TYPE_MAIN_VARIANT (*type); if (!in_system_header_at (input_location) && type_mv != integer_type_node && type_mv != unsigned_type_node && type_mv != boolean_type_node) pedwarn_c90 (input_location, OPT_Wpedantic, "type of bit-field %qs is a GCC extension", name); max_width = TYPE_PRECISION (*type); if (0 < compare_tree_int (*width, max_width)) { error ("width of %qs exceeds its type", name); w = max_width; *width = build_int_cst (integer_type_node, w); } else w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning (0, "%qs is narrower than values of its type", name); } } /* Print warning about variable length array if necessary. */ static void warn_variable_length_array (tree name, tree size) { if (TREE_CONSTANT (size)) { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array %qE whose size " "can%'t be evaluated", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array " "whose size can%'t be evaluated"); } else { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable length array %qE", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable " "length array"); } } /* Print warning about defaulting to int if necessary. */ static void warn_defaults_to (location_t location, int opt, const char *gmsgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, gmsgid); diagnostic_set_info (&diagnostic, gmsgid, &ap, location, flag_isoc99 ? DK_PEDWARN : DK_WARNING); diagnostic.option_index = opt; report_diagnostic (&diagnostic); va_end (ap); } /* Given declspecs and a declarator, determine the name and type of the object declared and construct a ..._DECL node for it. (In one case we can return a ..._TYPE node instead. For invalid input we sometimes return 0.) DECLSPECS is a c_declspecs structure for the declaration specifiers. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. INITIALIZED is true if the decl has an initializer. WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node representing the width of the bit-field. DECL_ATTRS points to the list of attributes that should be added to this decl. Any nested attributes that belong on the decl itself will be added to this list. If EXPR is not NULL, any expressions that need to be evaluated as part of evaluating variably modified types will be stored in *EXPR. If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be set to indicate whether operands in *EXPR can be used in constant expressions. DEPRECATED_STATE is a deprecated_states value indicating whether deprecation warnings should be suppressed. In the TYPENAME case, DECLARATOR is really an absolute declarator. It may also be so in the PARM case, for a prototype where the argument type is specified but not the name. This function is where the complicated C meanings of `static' and `extern' are interpreted. */ static tree grokdeclarator (const struct c_declarator *declarator, struct c_declspecs *declspecs, enum decl_context decl_context, bool initialized, tree *width, tree *decl_attrs, tree *expr, bool *expr_const_operands, enum deprecated_states deprecated_state) { tree type = declspecs->type; bool threadp = declspecs->thread_p; enum c_storage_class storage_class = declspecs->storage_class; int constp; int restrictp; int volatilep; int atomicp; int type_quals = TYPE_UNQUALIFIED; tree name = NULL_TREE; bool funcdef_flag = false; bool funcdef_syntax = false; bool size_varies = false; tree decl_attr = declspecs->decl_attr; int array_ptr_quals = TYPE_UNQUALIFIED; tree array_ptr_attrs = NULL_TREE; int array_parm_static = 0; bool array_parm_vla_unspec_p = false; tree returned_attrs = NULL_TREE; bool bitfield = width != NULL; tree element_type; tree orig_qual_type = NULL; size_t orig_qual_indirect = 0; struct c_arg_info *arg_info = 0; addr_space_t as1, as2, address_space; location_t loc = UNKNOWN_LOCATION; const char *errmsg; tree expr_dummy; bool expr_const_operands_dummy; enum c_declarator_kind first_non_attr_kind; unsigned int alignas_align = 0; if (TREE_CODE (type) == ERROR_MARK) return error_mark_node; if (expr == NULL) expr = &expr_dummy; if (expr_const_operands == NULL) expr_const_operands = &expr_const_operands_dummy; *expr = declspecs->expr; *expr_const_operands = declspecs->expr_const_operands; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; /* Look inside a declarator for the name being declared and get it as an IDENTIFIER_NODE, for an error message. */ { const struct c_declarator *decl = declarator; first_non_attr_kind = cdk_attrs; while (decl) switch (decl->kind) { case cdk_array: loc = decl->id_loc; /* FALL THRU. */ case cdk_function: case cdk_pointer: funcdef_syntax = (decl->kind == cdk_function); if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = decl->declarator; break; case cdk_attrs: decl = decl->declarator; break; case cdk_id: loc = decl->id_loc; if (decl->u.id) name = decl->u.id; if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = 0; break; default: gcc_unreachable (); } if (name == 0) { gcc_assert (decl_context == PARM || decl_context == TYPENAME || (decl_context == FIELD && declarator->kind == cdk_id)); gcc_assert (!initialized); } } /* A function definition's declarator must have the form of a function declarator. */ if (funcdef_flag && !funcdef_syntax) return 0; /* If this looks like a function definition, make it one, even if it occurs where parms are expected. Then store_parm_decls will reject it and not use it as a parm. */ if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag) decl_context = PARM; if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (declspecs->type, declspecs->decl_attr); if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope && variably_modified_type_p (type, NULL_TREE)) { if (name) error_at (loc, "variably modified %qE at file scope", name); else error_at (loc, "variably modified field at file scope"); type = integer_type_node; } size_varies = C_TYPE_VARIABLE_SIZE (type) != 0; /* Diagnose defaulting to "int". */ if (declspecs->default_int_p && !in_system_header_at (input_location)) { /* Issue a warning if this is an ISO C 99 program or if -Wreturn-type and this is a function, or if -Wimplicit; prefer the former warning since it is more explicit. */ if ((warn_implicit_int || warn_return_type || flag_isoc99) && funcdef_flag) warn_about_return_type = 1; else { if (name) warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in declaration " "of %qE", name); else warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in type name"); } } /* Adjust the type if a bit-field is being declared, -funsigned-bitfields applied and the type is not explicitly "signed". */ if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p && TREE_CODE (type) == INTEGER_TYPE) type = unsigned_type_for (type); /* Figure out the type qualifiers for the declaration. There are two ways a declaration can become qualified. One is something like `const int i' where the `const' is explicit. Another is something like `typedef const int CI; CI i' where the type of the declaration contains the `const'. A third possibility is that there is a type qualifier on the element type of a typedefed array type, in which case we should extract that qualifier so that c_apply_type_quals_to_decl receives the full list of qualifiers to work with (C90 is not entirely clear about whether duplicate qualifiers should be diagnosed in this case, but it seems most appropriate to do so). */ element_type = strip_array_types (type); constp = declspecs->const_p + TYPE_READONLY (element_type); restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type); volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type); atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type); as1 = declspecs->address_space; as2 = TYPE_ADDR_SPACE (element_type); address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1; if (constp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>"); if (restrictp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>"); if (volatilep > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>"); if (atomicp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>"); if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2) error_at (loc, "conflicting named address spaces (%s vs %s)", c_addr_space_name (as1), c_addr_space_name (as2)); if ((TREE_CODE (type) == ARRAY_TYPE || first_non_attr_kind == cdk_array) && TYPE_QUALS (element_type)) { orig_qual_type = type; type = TYPE_MAIN_VARIANT (type); } type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0) | (atomicp ? TYPE_QUAL_ATOMIC : 0) | ENCODE_QUAL_ADDR_SPACE (address_space)); if (type_quals != TYPE_QUALS (element_type)) orig_qual_type = NULL_TREE; /* Applying the _Atomic qualifier to an array type (through the use of typedefs or typeof) must be detected here. If the qualifier is introduced later, any appearance of applying it to an array is actually applying it to an element of that array. */ if (atomicp && TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (funcdef_flag && (threadp || storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef)) { if (storage_class == csc_auto) pedwarn (loc, (current_scope == file_scope) ? 0 : OPT_Wpedantic, "function definition declared %<auto%>"); if (storage_class == csc_register) error_at (loc, "function definition declared %<register%>"); if (storage_class == csc_typedef) error_at (loc, "function definition declared %<typedef%>"); if (threadp) error_at (loc, "function definition declared %qs", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; if (storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef) storage_class = csc_none; } else if (decl_context != NORMAL && (storage_class != csc_none || threadp)) { if (decl_context == PARM && storage_class == csc_register) ; else { switch (decl_context) { case FIELD: if (name) error_at (loc, "storage class specified for structure " "field %qE", name); else error_at (loc, "storage class specified for structure field"); break; case PARM: if (name) error_at (loc, "storage class specified for parameter %qE", name); else error_at (loc, "storage class specified for unnamed parameter"); break; default: error_at (loc, "storage class specified for typename"); break; } storage_class = csc_none; threadp = false; } } else if (storage_class == csc_extern && initialized && !funcdef_flag) { /* 'extern' with initialization is invalid if not at file scope. */ if (current_scope == file_scope) { /* It is fine to have 'extern const' when compiling at C and C++ intersection. */ if (!(warn_cxx_compat && constp)) warning_at (loc, 0, "%qE initialized and declared %<extern%>", name); } else error_at (loc, "%qE has both %<extern%> and initializer", name); } else if (current_scope == file_scope) { if (storage_class == csc_auto) error_at (loc, "file-scope declaration of %qE specifies %<auto%>", name); if (pedantic && storage_class == csc_register) pedwarn (input_location, OPT_Wpedantic, "file-scope declaration of %qE specifies %<register%>", name); } else { if (storage_class == csc_extern && funcdef_flag) error_at (loc, "nested function %qE declared %<extern%>", name); else if (threadp && storage_class == csc_none) { error_at (loc, "function-scope %qE implicitly auto and declared " "%qs", name, declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; } } /* Now figure out the structure of the declarator proper. Descend through it, creating more complex types, until we reach the declared identifier (or NULL_TREE, in an absolute declarator). At each stage we maintain an unqualified version of the type together with any qualifiers that should be applied to it with c_build_qualified_type; this way, array types including multidimensional array types are first built up in unqualified form and then the qualified form is created with TYPE_MAIN_VARIANT pointing to the unqualified form. */ while (declarator && declarator->kind != cdk_id) { if (type == error_mark_node) { declarator = declarator->declarator; continue; } /* Each level of DECLARATOR is either a cdk_array (for ...[..]), a cdk_pointer (for *...), a cdk_function (for ...(...)), a cdk_attrs (for nested attributes), or a cdk_id (for the name being declared or the place in an absolute declarator where the name was omitted). For the last case, we have just exited the loop. At this point, TYPE is the type of elements of an array, or for a function to return, or for a pointer to point to. After this sequence of ifs, TYPE is the type of the array or function or pointer, and DECLARATOR has had its outermost layer removed. */ if (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static) { /* Only the innermost declarator (making a parameter be of array type which is converted to pointer type) may have static or type qualifiers. */ error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } switch (declarator->kind) { case cdk_attrs: { /* A declarator with embedded attributes. */ tree attrs = declarator->u.attrs; const struct c_declarator *inner_decl; int attr_flags = 0; declarator = declarator->declarator; inner_decl = declarator; while (inner_decl->kind == cdk_attrs) inner_decl = inner_decl->declarator; if (inner_decl->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; else if (inner_decl->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; else if (inner_decl->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); break; } case cdk_array: { tree itype = NULL_TREE; tree size = declarator->u.array.dimen; /* The index is a signed object `sizetype' bits wide. */ tree index_type = c_common_signed_type (sizetype); array_ptr_quals = declarator->u.array.quals; array_ptr_attrs = declarator->u.array.attrs; array_parm_static = declarator->u.array.static_p; array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p; declarator = declarator->declarator; /* Check for some types that there cannot be arrays of. */ if (VOID_TYPE_P (type)) { if (name) error_at (loc, "declaration of %qE as array of voids", name); else error_at (loc, "declaration of type name as array of voids"); type = error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "declaration of %qE as array of functions", name); else error_at (loc, "declaration of type name as array of " "functions"); type = error_mark_node; } if (pedantic && !in_system_header_at (input_location) && flexible_array_type_p (type)) pedwarn (loc, OPT_Wpedantic, "invalid use of structure with flexible array member"); if (size == error_mark_node) type = error_mark_node; if (type == error_mark_node) continue; /* If size was specified, set ITYPE to a range-type for that size. Otherwise, ITYPE remains null. finish_decl may figure it out from an initial value. */ if (size) { bool size_maybe_const = true; bool size_int_const = (TREE_CODE (size) == INTEGER_CST && !TREE_OVERFLOW (size)); bool this_size_varies = false; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (size); if (!INTEGRAL_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has non-integer type", name); else error_at (loc, "size of unnamed array has non-integer type"); size = integer_one_node; } size = c_fully_fold (size, false, &size_maybe_const); if (pedantic && size_maybe_const && integer_zerop (size)) { if (name) pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array %qE", name); else pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array"); } if (TREE_CODE (size) == INTEGER_CST && size_maybe_const) { constant_expression_warning (size); if (tree_int_cst_sgn (size) < 0) { if (name) error_at (loc, "size of array %qE is negative", name); else error_at (loc, "size of unnamed array is negative"); size = integer_one_node; } /* Handle a size folded to an integer constant but not an integer constant expression. */ if (!size_int_const) { /* If this is a file scope declaration of an ordinary identifier, this is invalid code; diagnosing it here and not subsequently treating the type as variable-length avoids more confusing diagnostics later. */ if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) pedwarn (input_location, 0, "variably modified %qE at file scope", name); else this_size_varies = size_varies = true; warn_variable_length_array (name, size); } } else if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) { error_at (loc, "variably modified %qE at file scope", name); size = integer_one_node; } else { /* Make sure the array size remains visibly nonconstant even if it is (eg) a const variable with known value. */ this_size_varies = size_varies = true; warn_variable_length_array (name, size); if (flag_sanitize & SANITIZE_VLA && decl_context == NORMAL && do_ubsan_in_current_function ()) { /* Evaluate the array size only once. */ size = c_save_expr (size); size = c_fully_fold (size, false, NULL); size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size), ubsan_instrument_vla (loc, size), size); } } if (integer_zerop (size) && !this_size_varies) { /* A zero-length array cannot be represented with an unsigned index type, which is what we'll get with build_index_type. Create an open-ended range instead. */ itype = build_range_type (sizetype, size, NULL_TREE); } else { /* Arrange for the SAVE_EXPR on the inside of the MINUS_EXPR, which allows the -1 to get folded with the +1 that happens when building TYPE_SIZE. */ if (size_varies) size = save_expr (size); if (this_size_varies && TREE_CODE (size) == INTEGER_CST) size = build2 (COMPOUND_EXPR, TREE_TYPE (size), integer_zero_node, size); /* Compute the maximum valid index, that is, size - 1. Do the calculation in index_type, so that if it is a variable the computations will be done in the proper mode. */ itype = fold_build2_loc (loc, MINUS_EXPR, index_type, convert (index_type, size), convert (index_type, size_one_node)); /* The above overflows when size does not fit in index_type. ??? While a size of INT_MAX+1 technically shouldn't cause an overflow (because we subtract 1), handling this case seems like an unnecessary complication. */ if (TREE_CODE (size) == INTEGER_CST && !int_fits_type_p (size, index_type)) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); type = error_mark_node; continue; } itype = build_index_type (itype); } if (this_size_varies) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (size), *expr, size); else *expr = size; *expr_const_operands &= size_maybe_const; } } else if (decl_context == FIELD) { bool flexible_array_member = false; if (array_parm_vla_unspec_p) /* Field names can in fact have function prototype scope so [*] is disallowed here through making the field variably modified, not through being something other than a declaration with function prototype scope. */ size_varies = true; else { const struct c_declarator *t = declarator; while (t->kind == cdk_attrs) t = t->declarator; flexible_array_member = (t->kind == cdk_id); } if (flexible_array_member && !in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); /* ISO C99 Flexible array members are effectively identical to GCC's zero-length array extension. */ if (flexible_array_member || array_parm_vla_unspec_p) itype = build_range_type (sizetype, size_zero_node, NULL_TREE); } else if (decl_context == PARM) { if (array_parm_vla_unspec_p) { itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } else if (decl_context == TYPENAME) { if (array_parm_vla_unspec_p) { /* C99 6.7.5.2p4 */ warning (0, "%<[*]%> not in a declaration"); /* We use this to avoid messing up with incomplete array types of the same type, that would otherwise be modified below. */ itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } /* Complain about arrays of incomplete types. */ if (!COMPLETE_TYPE_P (type)) { error_at (loc, "array type has incomplete element type %qT", type); type = error_mark_node; } else /* When itype is NULL, a shared incomplete array type is returned for all array of a given type. Elsewhere we make sure we don't complete that type before copying it, but here we want to make sure we don't ever modify the shared type, so we gcc_assert (itype) below. */ { addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type)) type = build_qualified_type (type, ENCODE_QUAL_ADDR_SPACE (as)); type = build_array_type (type, itype); } if (type != error_mark_node) { if (size_varies) { /* It is ok to modify type here even if itype is NULL: if size_varies, we're in a multi-dimensional array and the inner type has variable size, so the enclosing shared array type must too. */ if (size && TREE_CODE (size) == INTEGER_CST) type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); C_TYPE_VARIABLE_SIZE (type) = 1; } /* The GCC extension for zero-length arrays differs from ISO flexible array members in that sizeof yields zero. */ if (size && integer_zerop (size)) { gcc_assert (itype); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (array_parm_vla_unspec_p) { gcc_assert (itype); /* The type is complete. C99 6.7.5.2p4 */ type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } } if (decl_context != PARM && (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static)) { error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } orig_qual_indirect++; break; } case cdk_function: { /* Say it's a definition only for the declarator closest to the identifier, apart possibly from some attributes. */ bool really_funcdef = false; tree arg_types; orig_qual_type = NULL_TREE; if (funcdef_flag) { const struct c_declarator *t = declarator->declarator; while (t->kind == cdk_attrs) t = t->declarator; really_funcdef = (t->kind == cdk_id); } /* Declaring a function type. Make sure we have a valid type for the function to return. */ if (type == error_mark_node) continue; size_varies = false; /* Warn about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "%qE declared as function returning a " "function", name); else error_at (loc, "type name declared as function " "returning a function"); type = integer_type_node; } if (TREE_CODE (type) == ARRAY_TYPE) { if (name) error_at (loc, "%qE declared as function returning an array", name); else error_at (loc, "type name declared as function returning " "an array"); type = integer_type_node; } errmsg = targetm.invalid_return_type (type); if (errmsg) { error (errmsg); type = integer_type_node; } /* Construct the function type and go to the next inner layer of declarator. */ arg_info = declarator->u.arg_info; arg_types = grokparms (arg_info, really_funcdef); /* Type qualifiers before the return type of the function qualify the return type, not the function type. */ if (type_quals) { /* Type qualifiers on a function return type are normally permitted by the standard but have no effect, so give a warning at -Wreturn-type. Qualifiers on a void return type are banned on function definitions in ISO C; GCC used to used them for noreturn functions. */ if (VOID_TYPE_P (type) && really_funcdef) pedwarn (loc, 0, "function definition has qualified void return type"); else warning_at (loc, OPT_Wignored_qualifiers, "type qualifiers ignored on function return type"); type = c_build_qualified_type (type, type_quals); } type_quals = TYPE_UNQUALIFIED; type = build_function_type (type, arg_types); declarator = declarator->declarator; /* Set the TYPE_CONTEXTs for each tagged type which is local to the formal parameter list of this FUNCTION_TYPE to point to the FUNCTION_TYPE node itself. */ { c_arg_tag *tag; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) TYPE_CONTEXT (tag->type) = type; } break; } case cdk_pointer: { /* Merge any constancy or volatility into the target type for the pointer. */ if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); orig_qual_type = NULL_TREE; size_varies = false; /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We handle the NORMAL and FIELD contexts here by attaching an artificial TYPE_DECL to such pointed-to type. This forces the sizes evaluation at a safe point and ensures it is not deferred until e.g. within a deeper conditional context. We expect nothing to be needed here for PARM or TYPENAME. Pushing a TYPE_DECL at this point for TYPENAME would actually be incorrect, as we might be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the fake TYPE_DECL in the enclosing context would force the size evaluation prior to the side effects. */ if (!TYPE_NAME (type) && (decl_context == NORMAL || decl_context == FIELD) && variably_modified_type_p (type, NULL_TREE)) { tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl) = 1; pushdecl (decl); finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE); TYPE_NAME (type) = decl; } type = c_build_pointer_type (type); /* Process type qualifiers (such as const or volatile) that were given inside the `*'. */ type_quals = declarator->u.pointer_quals; declarator = declarator->declarator; break; } default: gcc_unreachable (); } } *decl_attrs = chainon (returned_attrs, *decl_attrs); /* Now TYPE has the actual type, apart from any qualifiers in TYPE_QUALS. */ /* Warn about address space used for things other than static memory or pointers. */ address_space = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (address_space)) { if (decl_context == NORMAL) { switch (storage_class) { case csc_auto: error ("%qs combined with %<auto%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_register: error ("%qs combined with %<register%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_none: if (current_function_scope) { error ("%qs specified for auto variable %qE", c_addr_space_name (address_space), name); break; } break; case csc_static: case csc_extern: case csc_typedef: break; default: gcc_unreachable (); } } else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE) { if (name) error ("%qs specified for parameter %qE", c_addr_space_name (address_space), name); else error ("%qs specified for unnamed parameter", c_addr_space_name (address_space)); } else if (decl_context == FIELD) { if (name) error ("%qs specified for structure field %qE", c_addr_space_name (address_space), name); else error ("%qs specified for structure field", c_addr_space_name (address_space)); } } /* Check the type and width of a bit-field. */ if (bitfield) { check_bitfield_type_and_width (&type, width, name); /* C11 makes it implementation-defined (6.7.2.1#5) whether atomic types are permitted for bit-fields; we have no code to make bit-field accesses atomic, so disallow them. */ if (type_quals & TYPE_QUAL_ATOMIC) { if (name) error ("bit-field %qE has atomic type", name); else error ("bit-field has atomic type"); type_quals &= ~TYPE_QUAL_ATOMIC; } } /* Reject invalid uses of _Alignas. */ if (declspecs->alignas_p) { if (storage_class == csc_typedef) error_at (loc, "alignment specified for typedef %qE", name); else if (storage_class == csc_register) error_at (loc, "alignment specified for %<register%> object %qE", name); else if (decl_context == PARM) { if (name) error_at (loc, "alignment specified for parameter %qE", name); else error_at (loc, "alignment specified for unnamed parameter"); } else if (bitfield) { if (name) error_at (loc, "alignment specified for bit-field %qE", name); else error_at (loc, "alignment specified for unnamed bit-field"); } else if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "alignment specified for function %qE", name); else if (declspecs->align_log != -1) { alignas_align = 1U << declspecs->align_log; if (alignas_align < min_align_of_type (type)) { if (name) error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of %qE", name); else error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of unnamed field"); alignas_align = 0; } } } /* Did array size calculations overflow or does the array cover more than half of the address-space? */ if (TREE_CODE (type) == ARRAY_TYPE && COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST && ! valid_constant_size_p (TYPE_SIZE_UNIT (type))) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); /* If we proceed with the array type as it is, we'll eventually crash in tree_to_[su]hwi(). */ type = error_mark_node; } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (storage_class == csc_typedef) { tree decl; if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, TYPE_DECL, declarator->u.id, type); if (declspecs->explicit_signed_p) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl); if (warn_cxx_compat && declarator->u.id != NULL_TREE) { struct c_binding *b = I_TAG_BINDING (declarator->u.id); if (b != NULL && b->decl != NULL_TREE && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type)) { warning_at (declarator->id_loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } return decl; } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids const or volatile function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); return type; } if (pedantic && decl_context == FIELD && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.2.1p8 */ pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot " "have a variably modified type"); } /* Aside from typedefs and type names (handle above), `void' at top level (not within pointer) is allowed only in public variables. We don't complain about parms either, but that is because a better error message can be made later. */ if (VOID_TYPE_P (type) && decl_context != PARM && !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE) && (storage_class == csc_extern || (current_scope == file_scope && !(storage_class == csc_static || storage_class == csc_register))))) { error_at (loc, "variable or field %qE declared void", name); type = integer_type_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ { tree decl; if (decl_context == PARM) { tree promoted_type; bool array_parameter_p = false; /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = TREE_TYPE (type); if (orig_qual_type != NULL_TREE) { if (orig_qual_indirect == 0) orig_qual_type = TREE_TYPE (orig_qual_type); else orig_qual_indirect--; } if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); type = c_build_pointer_type (type); type_quals = array_ptr_quals; if (type_quals) type = c_build_qualified_type (type, type_quals); /* We don't yet implement attributes in this context. */ if (array_ptr_attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "attributes in parameter array declarator ignored"); size_varies = false; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type = c_build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; } else if (type_quals) type = c_build_qualified_type (type, type_quals); decl = build_decl (declarator->id_loc, PARM_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; C_ARRAY_PARAMETER (decl) = array_parameter_p; /* Compute the type actually passed in the parmlist, for the case where there is no prototype. (For example, shorts and chars are passed as ints.) When there is a prototype, this is overridden later. */ if (type == error_mark_node) promoted_type = type; else promoted_type = c_type_promotes_to (type); DECL_ARG_TYPE (decl) = promoted_type; if (declspecs->inline_p) pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl); } else if (decl_context == FIELD) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); /* Structure field. It may not be a function. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "field %qE declared as a function", name); type = build_pointer_type (type); } else if (TREE_CODE (type) != ERROR_MARK && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type)) { if (name) error_at (loc, "field %qE has incomplete type", name); else error_at (loc, "unnamed field has incomplete type"); type = error_mark_node; } else if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { /* We have a flexible array member through a typedef. Set suitable range. Whether this is a correct position for a flexible array member will be determined elsewhere. */ if (!in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node, NULL_TREE); } type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, FIELD_DECL, declarator->u.id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !declarator->u.id) TREE_NO_WARNING (decl) = 1; if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (storage_class == csc_register || threadp) { error_at (loc, "invalid storage class for function %qE", name); } else if (current_scope != file_scope) { /* Function declaration not at file scope. Storage classes other than `extern' are not allowed, C99 6.7.1p5, and `extern' makes no difference. However, GCC allows 'auto', perhaps with 'inline', to support nested functions. */ if (storage_class == csc_auto) pedwarn (loc, OPT_Wpedantic, "invalid storage class for function %qE", name); else if (storage_class == csc_static) { error_at (loc, "invalid storage class for function %qE", name); if (funcdef_flag) storage_class = declspecs->storage_class = csc_none; else return 0; } } decl = build_decl (declarator->id_loc, FUNCTION_DECL, declarator->u.id, type); decl = build_decl_attribute_variant (decl, decl_attr); if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); /* Every function declaration is an external reference (DECL_EXTERNAL) except for those which are not at file scope and are explicitly declared "auto". This is forbidden by standard C (C99 6.7.1p5) and is interpreted by GCC to signify a forward declaration of a nested function. */ if (storage_class == csc_auto && current_scope != file_scope) DECL_EXTERNAL (decl) = 0; /* In C99, a function which is declared 'inline' with 'extern' is not an external reference (which is confusing). It means that the later definition of the function must be output in this file, C99 6.7.4p6. In GNU C89, a function declared 'extern inline' is an external reference. */ else if (declspecs->inline_p && storage_class != csc_static) DECL_EXTERNAL (decl) = ((storage_class == csc_extern) == flag_gnu89_inline); else DECL_EXTERNAL (decl) = !initialized; /* Record absence of global scope for `static' or `auto'. */ TREE_PUBLIC (decl) = !(storage_class == csc_static || storage_class == csc_auto); /* For a function definition, record the argument information block where store_parm_decls will look for it. */ if (funcdef_flag) current_function_arg_info = arg_info; if (declspecs->default_int_p) C_FUNCTION_IMPLICIT_INT (decl) = 1; /* Record presence of `inline' and `_Noreturn', if it is reasonable. */ if (flag_hosted && MAIN_NAME_P (declarator->u.id)) { if (declspecs->inline_p) pedwarn (loc, 0, "cannot inline function %<main%>"); if (declspecs->noreturn_p) pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>"); } else { if (declspecs->inline_p) /* Record that the function is declared `inline'. */ DECL_DECLARED_INLINE_P (decl) = 1; if (declspecs->noreturn_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Noreturn%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Noreturn%>"); TREE_THIS_VOLATILE (decl) = 1; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ int extern_ref = !initialized && storage_class == csc_extern; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); /* C99 6.2.2p7: It is invalid (compile-time undefined behavior) to create an 'extern' declaration for a variable if there is a global declaration that is 'static' and the global declaration is not visible. (If the static declaration _is_ currently visible, the 'extern' declaration is taken to refer to that decl.) */ if (extern_ref && current_scope != file_scope) { tree global_decl = identifier_global_value (declarator->u.id); tree visible_decl = lookup_name (declarator->u.id); if (global_decl && global_decl != visible_decl && TREE_CODE (global_decl) == VAR_DECL && !TREE_PUBLIC (global_decl)) error_at (loc, "variable previously declared %<static%> " "redeclared %<extern%>"); } decl = build_decl (declarator->id_loc, VAR_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl); /* At file scope, an initialized extern declaration may follow a static declaration. In that case, DECL_EXTERNAL will be reset later in start_decl. */ DECL_EXTERNAL (decl) = (storage_class == csc_extern); /* At file scope, the presence of a `static' or `register' storage class specifier, or the absence of all storage class specifiers makes this declaration a definition (perhaps tentative). Also, the absence of `static' makes it public. */ if (current_scope == file_scope) { TREE_PUBLIC (decl) = storage_class != csc_static; TREE_STATIC (decl) = !extern_ref; } /* Not at file scope, only `static' makes a static definition. */ else { TREE_STATIC (decl) = (storage_class == csc_static); TREE_PUBLIC (decl) = extern_ref; } if (threadp) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if ((storage_class == csc_extern || (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE && !funcdef_flag)) && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.5.2p2 */ if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "non-nested function with variably modified type"); else error_at (loc, "object with variably modified type must have " "no linkage"); } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == csc_register) { C_DECL_REGISTER (decl) = 1; DECL_REGISTER (decl) = 1; } /* Record constancy and volatility. */ c_apply_type_quals_to_decl (type_quals, decl); /* Apply _Alignas specifiers. */ if (alignas_align) { DECL_ALIGN (decl) = alignas_align * BITS_PER_UNIT; DECL_USER_ALIGN (decl) = 1; } /* If a type has volatile components, it should be stored in memory. Otherwise, the fact that those components are volatile will be ignored, and would even crash the compiler. Of course, this only makes sense on VAR,PARM, and RESULT decl's. */ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)) && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL)) { /* It is not an error for a structure with volatile fields to be declared register, but reset DECL_REGISTER since it cannot actually go in a register. */ int was_reg = C_DECL_REGISTER (decl); C_DECL_REGISTER (decl) = 0; DECL_REGISTER (decl) = 0; c_mark_addressable (decl); C_DECL_REGISTER (decl) = was_reg; } /* This is the earliest point at which we might know the assembler name of a variable. Thus, if it's known before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl)); if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && TREE_PUBLIC (decl) && TREE_STATIC (decl) && (TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, ("non-local variable %qD with anonymous type is " "questionable in C++"), decl); return decl; } } /* Decode the parameter-list info for a function type or function definition. The argument is the value returned by `get_parm_info' (or made in c-parse.c if there is an identifier list instead of a parameter decl list). These two functions are separate because when a function returns or receives functions then each is called multiple times but the order of calls is different. The last call to `grokparms' is always the one that contains the formal parameter names of a function definition. Return a list of arg types to use in the FUNCTION_TYPE for this function. FUNCDEF_FLAG is true for a function definition, false for a mere declaration. A nonempty identifier-list gets an error message when FUNCDEF_FLAG is false. */ static tree grokparms (struct c_arg_info *arg_info, bool funcdef_flag) { tree arg_types = arg_info->types; if (funcdef_flag && arg_info->had_vla_unspec) { /* A function definition isn't function prototype scope C99 6.2.1p4. */ /* C99 6.7.5.2p4 */ error ("%<[*]%> not allowed in other than function prototype scope"); } if (arg_types == 0 && !funcdef_flag && !in_system_header_at (input_location)) warning (OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); if (arg_types == error_mark_node) return 0; /* don't set TYPE_ARG_TYPES in this case */ else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE) { if (!funcdef_flag) { pedwarn (input_location, 0, "parameter names (without types) in function declaration"); arg_info->parms = NULL_TREE; } else arg_info->parms = arg_info->types; arg_info->types = 0; return 0; } else { tree parm, type, typelt; unsigned int parmno; const char *errmsg; /* If there is a parameter of incomplete type in a definition, this is an error. In a declaration this is valid, and a struct or union type may be completed later, before any calls or definition of the function. In the case where the tag was first declared within the parameter list, a warning has already been given. If a parameter has void type, then however the function cannot be defined or called, so warn. */ for (parm = arg_info->parms, typelt = arg_types, parmno = 1; parm; parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++) { type = TREE_VALUE (typelt); if (type == error_mark_node) continue; if (!COMPLETE_TYPE_P (type)) { if (funcdef_flag) { if (DECL_NAME (parm)) error_at (input_location, "parameter %u (%q+D) has incomplete type", parmno, parm); else error_at (DECL_SOURCE_LOCATION (parm), "parameter %u has incomplete type", parmno); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } else if (VOID_TYPE_P (type)) { if (DECL_NAME (parm)) warning_at (input_location, 0, "parameter %u (%q+D) has void type", parmno, parm); else warning_at (DECL_SOURCE_LOCATION (parm), 0, "parameter %u has void type", parmno); } } errmsg = targetm.invalid_parameter_type (type); if (errmsg) { error (errmsg); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } if (DECL_NAME (parm) && TREE_USED (parm)) warn_if_shadowing (parm); } return arg_types; } } /* Allocate and initialize a c_arg_info structure from the parser's obstack. */ struct c_arg_info * build_arg_info (void) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = NULL_TREE; ret->tags = NULL; ret->types = NULL_TREE; ret->others = NULL_TREE; ret->pending_sizes = NULL; ret->had_vla_unspec = 0; return ret; } /* Take apart the current scope and return a c_arg_info structure with info on a parameter list just parsed. This structure is later fed to 'grokparms' and 'store_parm_decls'. ELLIPSIS being true means the argument list ended in '...' so don't append a sentinel (void_list_node) to the end of the type-list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ struct c_arg_info * get_parm_info (bool ellipsis, tree expr) { struct c_binding *b = current_scope->bindings; struct c_arg_info *arg_info = build_arg_info (); tree parms = 0; vec<c_arg_tag, va_gc> *tags = NULL; tree types = 0; tree others = 0; static bool explained_incomplete_types = false; bool gave_void_only_once_err = false; arg_info->had_vla_unspec = current_scope->had_vla_unspec; /* The bindings in this scope must not get put into a block. We will take care of deleting the binding nodes. */ current_scope->bindings = 0; /* This function is only called if there was *something* on the parameter list. */ gcc_assert (b); /* A parameter list consisting solely of 'void' indicates that the function takes no arguments. But if the 'void' is qualified (by 'const' or 'volatile'), or has a storage class specifier ('register'), then the behavior is undefined; issue an error. Typedefs for 'void' are OK (see DR#157). */ if (b->prev == 0 /* one binding */ && TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */ && !DECL_NAME (b->decl) /* anonymous */ && VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */ { if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED || C_DECL_REGISTER (b->decl)) error ("%<void%> as only parameter may not be qualified"); /* There cannot be an ellipsis. */ if (ellipsis) error ("%<void%> must be the only parameter"); arg_info->types = void_list_node; return arg_info; } if (!ellipsis) types = void_list_node; /* Break up the bindings list into parms, tags, types, and others; apply sanity checks; purge the name-to-decl bindings. */ while (b) { tree decl = b->decl; tree type = TREE_TYPE (decl); c_arg_tag tag; const char *keyword; switch (TREE_CODE (decl)) { case PARM_DECL: if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } /* Check for forward decls that never got their actual decl. */ if (TREE_ASM_WRITTEN (decl)) error ("parameter %q+D has just a forward declaration", decl); /* Check for (..., void, ...) and issue an error. */ else if (VOID_TYPE_P (type) && !DECL_NAME (decl)) { if (!gave_void_only_once_err) { error ("%<void%> must be the only parameter"); gave_void_only_once_err = true; } } else { /* Valid parameter, add it to the list. */ DECL_CHAIN (decl) = parms; parms = decl; /* Since there is a prototype, args are passed in their declared types. The back end may override this later. */ DECL_ARG_TYPE (decl) = type; types = tree_cons (0, type, types); } break; case ENUMERAL_TYPE: keyword = "enum"; goto tag; case UNION_TYPE: keyword = "union"; goto tag; case RECORD_TYPE: keyword = "struct"; goto tag; tag: /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } /* Warn about any struct, union or enum tags defined in a parameter list. The scope of such types is limited to the parameter list, which is rarely if ever desirable (it's impossible to call such a function with type- correct arguments). An anonymous union parm type is meaningful as a GNU extension, so don't warn for that. */ if (TREE_CODE (decl) != UNION_TYPE || b->id != 0) { if (b->id) /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "%<%s %E%> declared inside parameter list", keyword, b->id); else /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "anonymous %s declared inside parameter list", keyword); if (!explained_incomplete_types) { warning (0, "its scope is only this definition or declaration," " which is probably not what you want"); explained_incomplete_types = true; } } tag.id = b->id; tag.type = decl; vec_safe_push (tags, tag); break; case CONST_DECL: case TYPE_DECL: case FUNCTION_DECL: /* CONST_DECLs appear here when we have an embedded enum, and TYPE_DECLs appear here when we have an embedded struct or union. No warnings for this - we already warned about the type itself. FUNCTION_DECLs appear when there is an implicit function declaration in the parameter list. */ /* When we reinsert this decl in the function body, we need to reconstruct whether it was marked as nested. */ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL ? b->nested : !b->nested); DECL_CHAIN (decl) = others; others = decl; /* fall through */ case ERROR_MARK: /* error_mark_node appears here when we have an undeclared variable. Just throw it away. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } break; /* Other things that might be encountered. */ case LABEL_DECL: case VAR_DECL: default: gcc_unreachable (); } b = free_binding_and_advance (b); } arg_info->parms = parms; arg_info->tags = tags; arg_info->types = types; arg_info->others = others; arg_info->pending_sizes = expr; return arg_info; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference with location LOC if it is not defined. Return a c_typespec structure for the type specifier. */ struct c_typespec parser_xref_tag (location_t loc, enum tree_code code, tree name) { struct c_typespec ret; tree ref; location_t refloc; ret.expr = NULL_TREE; ret.expr_const_operands = true; /* If a cross reference is requested, look up the type already defined for this tag and return it. */ ref = lookup_tag (code, name, 0, &refloc); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the wrong type in the same scope, we will have had an error message already; if in a different scope and declaring a name, pending_xref_error will give an error message; but if in a different scope and not declaring a name, this tag should shadow the previous declaration of a different type of tag, and this would not work properly if we return the reference found. (For example, with "struct foo" in an outer scope, "union foo;" must shadow that tag with a new one of union type.) */ ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref); if (ref && TREE_CODE (ref) == code) { if (C_TYPE_DEFINED_IN_STRUCT (ref) && loc != UNKNOWN_LOCATION && warn_cxx_compat) { switch (code) { case ENUMERAL_TYPE: warning_at (loc, OPT_Wc___compat, ("enum type defined in struct or union " "is not visible in C++")); inform (refloc, "enum type defined here"); break; case RECORD_TYPE: warning_at (loc, OPT_Wc___compat, ("struct defined in struct or union " "is not visible in C++")); inform (refloc, "struct defined here"); break; case UNION_TYPE: warning_at (loc, OPT_Wc___compat, ("union defined in struct or union " "is not visible in C++")); inform (refloc, "union defined here"); break; default: gcc_unreachable(); } } ret.spec = ref; return ret; } /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ ref = make_node (code); if (code == ENUMERAL_TYPE) { /* Give the type a default layout like unsigned int to avoid crashing if it does not get defined. */ SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node)); TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node); TYPE_USER_ALIGN (ref) = 0; TYPE_UNSIGNED (ref) = 1; TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node); TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node); TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node); } pushtag (loc, name, ref); ret.spec = ref; return ret; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. Return a tree for the type. */ tree xref_tag (enum tree_code code, tree name) { return parser_xref_tag (input_location, code, name).spec; } /* Make sure that the tag NAME is defined *in the current scope* at least as a forward reference. LOC is the location of the struct's definition. CODE says which kind of tag NAME ought to be. This stores the current value of the file static STRUCT_PARSE_INFO in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a new c_struct_parse_info structure. The old value of STRUCT_PARSE_INFO is restored in finish_struct. */ tree start_struct (location_t loc, enum tree_code code, tree name, struct c_struct_parse_info **enclosing_struct_parse_info) { /* If there is already a tag defined at this scope (as a forward reference), just return it. */ tree ref = NULL_TREE; location_t refloc = UNKNOWN_LOCATION; if (name != NULL_TREE) ref = lookup_tag (code, name, 1, &refloc); if (ref && TREE_CODE (ref) == code) { if (TYPE_SIZE (ref)) { if (code == UNION_TYPE) error_at (loc, "redefinition of %<union %E%>", name); else error_at (loc, "redefinition of %<struct %E%>", name); if (refloc != UNKNOWN_LOCATION) inform (refloc, "originally defined here"); /* Don't create structures using a name already in use. */ ref = NULL_TREE; } else if (C_TYPE_BEING_DEFINED (ref)) { if (code == UNION_TYPE) error_at (loc, "nested redefinition of %<union %E%>", name); else error_at (loc, "nested redefinition of %<struct %E%>", name); /* Don't bother to report "originally defined here" for a nested redefinition; the original definition should be obvious. */ /* Don't create structures that contain themselves. */ ref = NULL_TREE; } } /* Otherwise create a forward-reference just so the tag is in scope. */ if (ref == NULL_TREE || TREE_CODE (ref) != code) { ref = make_node (code); pushtag (loc, name, ref); } C_TYPE_BEING_DEFINED (ref) = 1; TYPE_PACKED (ref) = flag_pack_struct; *enclosing_struct_parse_info = struct_parse_info; struct_parse_info = XNEW (struct c_struct_parse_info); struct_parse_info->struct_types.create (0); struct_parse_info->fields.create (0); struct_parse_info->typedefs_seen.create (0); /* FIXME: This will issue a warning for a use of a type defined within a statement expr used within sizeof, et. al. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return ref; } /* Process the specs, declarator and width (NULL if omitted) of a structure component, returning a FIELD_DECL node. WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node. DECL_ATTRS is as for grokdeclarator. LOC is the location of the structure component. This is done during the parsing of the struct declaration. The FIELD_DECL nodes are chained together and the lot of them are ultimately passed to `build_struct' to make the RECORD_TYPE node. */ tree grokfield (location_t loc, struct c_declarator *declarator, struct c_declspecs *declspecs, tree width, tree *decl_attrs) { tree value; if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE && width == NULL_TREE) { /* This is an unnamed decl. If we have something of the form "union { list } ;" then this is the anonymous union extension. Similarly for struct. If this is something of the form "struct foo;", then If MS or Plan 9 extensions are enabled, this is handled as an anonymous struct. Otherwise this is a forward declaration of a structure tag. If this is something of the form "foo;" and foo is a TYPE_DECL, then If foo names a structure or union without a tag, then this is an anonymous struct (this is permitted by C11). If MS or Plan 9 extensions are enabled and foo names a structure, then again this is an anonymous struct. Otherwise this is an error. Oh what a horrid tangled web we weave. I wonder if MS consciously took this from Plan 9 or if it was an accident of implementation that took root before someone noticed the bug... */ tree type = declspecs->type; bool type_ok = (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE); bool ok = false; if (type_ok && (flag_ms_extensions || flag_plan9_extensions || !declspecs->typedef_p)) { if (flag_ms_extensions || flag_plan9_extensions) ok = true; else if (TYPE_NAME (type) == NULL) ok = true; else ok = false; } if (!ok) { pedwarn (loc, 0, "declaration does not declare anything"); return NULL_TREE; } if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 doesn%'t support unnamed structs/unions"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 doesn%'t support unnamed structs/unions"); } value = grokdeclarator (declarator, declspecs, FIELD, false, width ? &width : NULL, decl_attrs, NULL, NULL, DEPRECATED_NORMAL); finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE); DECL_INITIAL (value) = width; if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE) { /* If we currently have a binding for this field, set the in_struct field in the binding, so that we warn about lookups which find it. */ struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value)); if (b != NULL) { /* If the in_struct field is not yet set, push it on a list to be cleared when this struct is finished. */ if (!b->in_struct) { struct_parse_info->fields.safe_push (b); b->in_struct = 1; } } } return value; } /* Subroutine of detect_field_duplicates: return whether X and Y, which are both fields in the same struct, have duplicate field names. */ static bool is_duplicate_field (tree x, tree y) { if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y)) return true; /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE)) { tree xt, xn, yt, yn; xt = TREE_TYPE (x); if (DECL_NAME (x) != NULL_TREE) xn = DECL_NAME (x); else if ((TREE_CODE (xt) == RECORD_TYPE || TREE_CODE (xt) == UNION_TYPE) && TYPE_NAME (xt) != NULL_TREE && TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL) xn = DECL_NAME (TYPE_NAME (xt)); else xn = NULL_TREE; yt = TREE_TYPE (y); if (DECL_NAME (y) != NULL_TREE) yn = DECL_NAME (y); else if ((TREE_CODE (yt) == RECORD_TYPE || TREE_CODE (yt) == UNION_TYPE) && TYPE_NAME (yt) != NULL_TREE && TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL) yn = DECL_NAME (TYPE_NAME (yt)); else yn = NULL_TREE; if (xn != NULL_TREE && xn == yn) return true; } return false; } /* Subroutine of detect_field_duplicates: add the fields of FIELDLIST to HTAB, giving errors for any duplicates. */ static void detect_field_duplicates_hash (tree fieldlist, hash_table<pointer_hash <tree_node> > *htab) { tree x, y; tree_node **slot; for (x = fieldlist; x ; x = DECL_CHAIN (x)) if ((y = DECL_NAME (x)) != 0) { slot = htab->find_slot (y, INSERT); if (*slot) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } *slot = y; } else if (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) { detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab); /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL) { tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x))); slot = htab->find_slot (xn, INSERT); if (*slot) error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x))); *slot = xn; } } } /* Generate an error for any duplicate field names in FIELDLIST. Munge the list such that this does not present a problem later. */ static void detect_field_duplicates (tree fieldlist) { tree x, y; int timeout = 10; /* If the struct is the list of instance variables of an Objective-C class, then we need to check all the instance variables of superclasses when checking for duplicates (since you can't have an instance variable in a subclass with the same name as an instance variable in a superclass). We pass on this job to the Objective-C compiler. objc_detect_field_duplicates() will return false if we are not checking the list of instance variables and the C frontend should proceed with the standard field duplicate checks. If we are checking the list of instance variables, the ObjC frontend will do the check, emit the errors if needed, and then return true. */ if (c_dialect_objc ()) if (objc_detect_field_duplicates (false)) return; /* First, see if there are more than "a few" fields. This is trivially true if there are zero or one fields. */ if (!fieldlist || !DECL_CHAIN (fieldlist)) return; x = fieldlist; do { timeout--; if (DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) timeout = 0; x = DECL_CHAIN (x); } while (timeout > 0 && x); /* If there were "few" fields and no anonymous structures or unions, avoid the overhead of allocating a hash table. Instead just do the nested traversal thing. */ if (timeout > 0) { for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x)) /* When using -fplan9-extensions, we can have duplicates between typedef names and fields. */ if (DECL_NAME (x) || (flag_plan9_extensions && DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)) { for (y = fieldlist; y != x; y = TREE_CHAIN (y)) if (is_duplicate_field (y, x)) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } } } else { hash_table<pointer_hash <tree_node> > htab (37); detect_field_duplicates_hash (fieldlist, &htab); } } /* Finish up struct info used by -Wc++-compat. */ static void warn_cxx_compat_finish_struct (tree fieldlist) { unsigned int ix; tree x; struct c_binding *b; /* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in the current struct. We do this now at the end of the struct because the flag is used to issue visibility warnings, and we only want to issue those warnings if the type is referenced outside of the struct declaration. */ FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x) C_TYPE_DEFINED_IN_STRUCT (x) = 1; /* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of typedefs used when declaring fields in this struct. If the name of any of the fields is also a typedef name then the struct would not parse in C++, because the C++ lookup rules say that the typedef name would be looked up in the context of the struct, and would thus be the field rather than the typedef. */ if (!struct_parse_info->typedefs_seen.is_empty () && fieldlist != NULL_TREE) { /* Use a hash_set<tree> using the name of the typedef. We can use a hash_set<tree> because identifiers are interned. */ hash_set<tree> tset; FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x) tset.add (DECL_NAME (x)); for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE && tset.contains (DECL_NAME (x))) { warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat, ("using %qD as both field and typedef name is " "invalid in C++"), x); /* FIXME: It would be nice to report the location where the typedef name is used. */ } } } /* For each field which has a binding and which was not defined in an enclosing struct, clear the in_struct field. */ FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b) b->in_struct = 0; } /* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T. LOC is the location of the RECORD_TYPE or UNION_TYPE's definition. FIELDLIST is a chain of FIELD_DECL nodes for the fields. ATTRIBUTES are attributes to be applied to the structure. ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when the struct was started. */ tree finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, struct c_struct_parse_info *enclosing_struct_parse_info) { tree x; bool toplevel = file_scope == current_scope; int saw_named_field; /* If this type was previously laid out as a forward reference, make sure we lay it out again. */ TYPE_SIZE (t) = 0; decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); if (pedantic) { for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != 0) break; if (flag_isoc11 && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) break; } if (x == 0) { if (TREE_CODE (t) == UNION_TYPE) { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "union has no named members"); else pedwarn (loc, OPT_Wpedantic, "union has no members"); } else { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "struct has no named members"); else pedwarn (loc, OPT_Wpedantic, "struct has no members"); } } } /* Install struct as DECL_CONTEXT of each field decl. Also process specified field sizes, found in the DECL_INITIAL, storing 0 there after the type has been changed to precision equal to its width, rather than the precision of the specified standard type. (Correct layout requires the original type to have been preserved until now.) */ saw_named_field = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (TREE_TYPE (x) == error_mark_node) continue; DECL_CONTEXT (x) = t; /* If any field is const, the structure type is pseudo-const. */ if (TREE_READONLY (x)) C_TYPE_FIELDS_READONLY (t) = 1; else { /* A field that is pseudo-const makes the structure likewise. */ tree t1 = strip_array_types (TREE_TYPE (x)); if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (t1)) C_TYPE_FIELDS_READONLY (t) = 1; } /* Any field that is volatile means variables of this type must be treated in some ways as volatile. */ if (TREE_THIS_VOLATILE (x)) C_TYPE_FIELDS_VOLATILE (t) = 1; /* Any field of nominal variable size implies structure is too. */ if (C_DECL_VARIABLE_SIZE (x)) C_TYPE_VARIABLE_SIZE (t) = 1; if (DECL_INITIAL (x)) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; SET_DECL_C_BIT_FIELD (x); } if (TYPE_PACKED (t) && (DECL_BIT_FIELD (x) || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)) DECL_PACKED (x) = 1; /* Detect flexible array member in an invalid context. */ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in union"); TREE_TYPE (x) = error_mark_node; } else if (DECL_CHAIN (x) != NULL_TREE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member not at end of struct"); TREE_TYPE (x) = error_mark_node; } else if (!saw_named_field) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in otherwise empty struct"); TREE_TYPE (x) = error_mark_node; } } if (pedantic && TREE_CODE (t) == RECORD_TYPE && flexible_array_type_p (TREE_TYPE (x))) pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic, "invalid use of structure with flexible array member"); if (DECL_NAME (x) || TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) saw_named_field = 1; } detect_field_duplicates (fieldlist); /* Now we have the nearly final fieldlist. Record it, then lay out the structure or union (including the fields). */ TYPE_FIELDS (t) = fieldlist; layout_type (t); if (TYPE_SIZE_UNIT (t) && TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST && !TREE_OVERFLOW (TYPE_SIZE_UNIT (t)) && !valid_constant_size_p (TYPE_SIZE_UNIT (t))) error ("type %qT is too large", t); /* Give bit-fields their proper types. */ { tree *fieldlistp = &fieldlist; while (*fieldlistp) if (TREE_CODE (*fieldlistp) == FIELD_DECL && DECL_INITIAL (*fieldlistp) && TREE_TYPE (*fieldlistp) != error_mark_node) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (*fieldlistp)); tree type = TREE_TYPE (*fieldlistp); if (width != TYPE_PRECISION (type)) { TREE_TYPE (*fieldlistp) = c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type)); DECL_MODE (*fieldlistp) = TYPE_MODE (TREE_TYPE (*fieldlistp)); } DECL_INITIAL (*fieldlistp) = 0; } else fieldlistp = &DECL_CHAIN (*fieldlistp); } /* Now we have the truly final field list. Store it in this type and in the variants. */ TYPE_FIELDS (t) = fieldlist; /* If there are lots of fields, sort so we can look through them fast. We arbitrarily consider 16 or more elts to be "a lot". */ { int len = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (len > 15 || DECL_NAME (x) == NULL) break; len += 1; } if (len > 15) { tree *field_array; struct lang_type *space; struct sorted_fields_type *space2; len += list_length (x); /* Use the same allocation policy here that make_node uses, to ensure that this lives as long as the rest of the struct decl. All decls in an inline function need to be saved. */ space = ggc_cleared_alloc<struct lang_type> (); space2 = (sorted_fields_type *) ggc_internal_alloc (sizeof (struct sorted_fields_type) + len * sizeof (tree)); len = 0; space->s = space2; field_array = &space2->elts[0]; for (x = fieldlist; x; x = DECL_CHAIN (x)) { field_array[len++] = x; /* If there is anonymous struct or union, break out of the loop. */ if (DECL_NAME (x) == NULL) break; } /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */ if (x == NULL) { TYPE_LANG_SPECIFIC (t) = space; TYPE_LANG_SPECIFIC (t)->s->len = len; field_array = TYPE_LANG_SPECIFIC (t)->s->elts; qsort (field_array, len, sizeof (tree), field_decl_cmp); } } } for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x)) { TYPE_FIELDS (x) = TYPE_FIELDS (t); TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t); C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t); C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t); C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t); } /* If this was supposed to be a transparent union, but we can't make it one, warn and turn off the flag. */ if (TREE_CODE (t) == UNION_TYPE && TYPE_TRANSPARENT_AGGR (t) && (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))) { TYPE_TRANSPARENT_AGGR (t) = 0; warning_at (loc, 0, "union cannot be made transparent"); } /* If this structure or union completes the type of any previous variable declaration, lay it out and output its rtl. */ for (x = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)); x; x = TREE_CHAIN (x)) { tree decl = TREE_VALUE (x); if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (decl)); if (TREE_CODE (decl) != TYPE_DECL) { layout_decl (decl, 0); if (c_dialect_objc ()) objc_check_decl (decl); rest_of_decl_compilation (decl, toplevel, 0); } } C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)) = 0; /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ if (TYPE_STUB_DECL (t)) DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc; /* Finish debugging output for this type. */ rest_of_type_compilation (t, toplevel); /* If we're inside a function proper, i.e. not file-scope and not still parsing parameters, then arrange for the size of a variable sized type to be bound now. */ if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE)) add_stmt (build_stmt (loc, DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t))); if (warn_cxx_compat) warn_cxx_compat_finish_struct (fieldlist); struct_parse_info->struct_types.release (); struct_parse_info->fields.release (); struct_parse_info->typedefs_seen.release (); XDELETE (struct_parse_info); struct_parse_info = enclosing_struct_parse_info; /* If this struct is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (t); return t; } /* Lay out the type T, and its element type, and so on. */ static void layout_array_type (tree t) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (t)); layout_type (t); } /* Begin compiling the definition of an enumeration type. NAME is its name (or null if anonymous). LOC is the enum's location. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (location_t loc, struct c_enum_contents *the_enum, tree name) { tree enumtype = NULL_TREE; location_t enumloc = UNKNOWN_LOCATION; /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (name != NULL_TREE) enumtype = lookup_tag (ENUMERAL_TYPE, name, 1, &enumloc); if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (loc, name, enumtype); } if (C_TYPE_BEING_DEFINED (enumtype)) error_at (loc, "nested redefinition of %<enum %E%>", name); C_TYPE_BEING_DEFINED (enumtype) = 1; if (TYPE_VALUES (enumtype) != 0) { /* This enum is a named one that has been declared already. */ error_at (loc, "redeclaration of %<enum %E%>", name); if (enumloc != UNKNOWN_LOCATION) inform (enumloc, "originally defined here"); /* Completely replace its old definition. The old enumerators remain defined, however. */ TYPE_VALUES (enumtype) = 0; } the_enum->enum_next_value = integer_zero_node; the_enum->enum_overflow = 0; if (flag_short_enums) TYPE_PACKED (enumtype) = 1; /* FIXME: This will issue a warning for a use of a type defined within sizeof in a statement expr. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type and finish it off. ENUMTYPE is the type object, VALUES a list of decl-value pairs, and ATTRIBUTES are the specified attributes. Returns ENUMTYPE. */ tree finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; int precision; signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); /* Calculate the maximum value of any enumerator in this type. */ if (values == error_mark_node) minnode = maxnode = integer_zero_node; else { minnode = maxnode = TREE_VALUE (values); for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair)) { tree value = TREE_VALUE (pair); if (tree_int_cst_lt (maxnode, value)) maxnode = value; if (tree_int_cst_lt (value, minnode)) minnode = value; } } /* Construct the final type of this enumeration. It is the same as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; precision = MAX (tree_int_cst_min_precision (minnode, sign), tree_int_cst_min_precision (maxnode, sign)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype)) { if (precision > TYPE_PRECISION (enumtype)) { TYPE_PRECISION (enumtype) = 0; error ("specified mode too small for enumeral values"); } else precision = TYPE_PRECISION (enumtype); } if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node) || TYPE_PRECISION (enumtype)) { tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); tem = long_long_integer_type_node; } } else tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem); TYPE_SIZE (enumtype) = 0; TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem); layout_type (enumtype); if (values != error_mark_node) { /* Change the type of the enumerators to be the enum type. We need to do this irrespective of the size of the enum, for proper type checking. Replace the DECL_INITIALs of the enumerators, and the value slots of the list, with copies that have the enum type; they cannot be modified in place because they may be shared (e.g. integer_zero_node) Finally, change the purpose slots to point to the names of the decls. */ for (pair = values; pair; pair = TREE_CHAIN (pair)) { tree enu = TREE_PURPOSE (pair); tree ini = DECL_INITIAL (enu); TREE_TYPE (enu) = enumtype; /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. build_enumerator() converts any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, build_enumerator() would have already warned about those that don't fit. Here we convert the rest to the enumerator type. */ if (TREE_TYPE (ini) != integer_type_node) ini = convert (enumtype, ini); DECL_INITIAL (enu) = ini; TREE_PURPOSE (pair) = DECL_NAME (enu); TREE_VALUE (pair) = ini; } TYPE_VALUES (enumtype) = values; } /* Record the min/max values so that we can warn about bit-field enumerations that are too small for the values. */ lt = ggc_cleared_alloc<struct lang_type> (); lt->enum_min = minnode; lt->enum_max = maxnode; TYPE_LANG_SPECIFIC (enumtype) = lt; /* Fix up all variant types of this enum type. */ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem)) { if (tem == enumtype) continue; TYPE_VALUES (tem) = TYPE_VALUES (enumtype); TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype); TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype); TYPE_SIZE (tem) = TYPE_SIZE (enumtype); TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype); SET_TYPE_MODE (tem, TYPE_MODE (enumtype)); TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype); TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype); TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype); TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype); TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, toplevel); /* If this enum is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (enumtype); return enumtype; } /* Build and install a CONST_DECL for one value of the current enumeration type (one that was begun with start_enum). DECL_LOC is the location of the enumerator. LOC is the location of the '=' operator if any, DECL_LOC otherwise. Return a tree-list containing the CONST_DECL and its value. Assignment of sequential values by default is handled here. */ tree build_enumerator (location_t decl_loc, location_t loc, struct c_enum_contents *the_enum, tree name, tree value) { tree decl, type; /* Validate and default VALUE. */ if (value != 0) { /* Don't issue more errors for error_mark_node (i.e. an undeclared identifier) - just ignore the value expression. */ if (value == error_mark_node) value = 0; else if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (loc, "enumerator value for %qE is not an integer constant", name); value = 0; } else { if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); if (TREE_CODE (value) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "enumerator value for %qE is not an integer " "constant expression", name); } if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qE is not an integer constant", name); value = 0; } else { value = default_conversion (value); constant_expression_warning (value); } } } /* Default based on previous value. */ /* It should no longer be possible to have NON_LVALUE_EXPR in the default. */ if (value == 0) { value = the_enum->enum_next_value; if (the_enum->enum_overflow) error_at (loc, "overflow in enumeration values"); } /* Even though the underlying type of an enum is unspecified, the type of enumeration constants is explicitly defined as int (6.4.4.3/2 in the C99 Standard). GCC allows any integer type as an extension. */ else if (!int_fits_type_p (value, integer_type_node)) pedwarn (loc, OPT_Wpedantic, "ISO C restricts enumerator values to range of %<int%>"); /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. Here we convert any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, we would have already warned about those that don't fit. We have to do this here rather than in finish_enum because this value may be used to define more enumerators. */ if (int_fits_type_p (value, integer_type_node)) value = convert (integer_type_node, value); /* Set basis for default for next value. */ the_enum->enum_next_value = build_binary_op (EXPR_LOC_OR_LOC (value, input_location), PLUS_EXPR, value, integer_one_node, 0); the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value); /* Now create a declaration for the enum value name. */ type = TREE_TYPE (value); type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node) && TYPE_UNSIGNED (type))); decl = build_decl (decl_loc, CONST_DECL, name, type); DECL_INITIAL (decl) = convert (type, value); pushdecl (decl); return tree_cons (decl, value, NULL_TREE); } /* Create the FUNCTION_DECL for a function definition. DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns 1 on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return 0, which tells yyparse to report a parse error. */ int start_function (struct c_declspecs *declspecs, struct c_declarator *declarator, tree attributes) { tree decl1, old_decl; tree restype, resdecl; location_t loc; current_function_returns_value = 0; /* Assume, until we see it does. */ current_function_returns_null = 0; current_function_returns_abnormally = 0; warn_about_return_type = 0; c_switch_stack = NULL; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL, &attributes, NULL, NULL, DEPRECATED_NORMAL); /* If the declarator is not suitable for a function definition, cause a syntax error. */ if (decl1 == 0 || TREE_CODE (decl1) != FUNCTION_DECL) return 0; loc = DECL_SOURCE_LOCATION (decl1); c_decl_attributes (&decl1, attributes, 0); if (DECL_DECLARED_INLINE_P (decl1) && DECL_UNINLINABLE (decl1) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1))) warning_at (loc, OPT_Wattributes, "inline function %qD given attribute noinline", decl1); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl1) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1)) || current_function_decl)) { if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1); } announce_function (decl1); if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1)))) { error_at (loc, "return type is an incomplete type"); /* Make it return void instead. */ TREE_TYPE (decl1) = build_function_type (void_type_node, TYPE_ARG_TYPES (TREE_TYPE (decl1))); } if (warn_about_return_type) warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int : (warn_return_type ? OPT_Wreturn_type : OPT_Wimplicit_int), "return type defaults to %<int%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in pop_scope) with the BLOCK. */ DECL_INITIAL (decl1) = error_mark_node; /* A nested function is not global. */ if (current_function_decl != 0) TREE_PUBLIC (decl1) = 0; /* If this definition isn't a prototype and we had a prototype declaration before, copy the arg type info from that prototype. */ old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL) old_decl = 0; current_function_prototype_locus = UNKNOWN_LOCATION; current_function_prototype_built_in = false; current_function_prototype_arg_types = NULL_TREE; if (!prototype_p (TREE_TYPE (decl1))) { if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (TREE_TYPE (old_decl)))) { TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl), TREE_TYPE (decl1)); current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (old_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl1)); } if (TREE_PUBLIC (decl1)) { /* If there is an external prototype declaration of this function, record its location but do not copy information to this decl. This may be an invisible declaration (built-in or in a scope which has finished) or simply have more refined argument types than any declaration found above. */ struct c_binding *b; for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed) if (B_IN_SCOPE (b, external_scope)) break; if (b) { tree ext_decl, ext_type; ext_decl = b->decl; ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl); if (TREE_CODE (ext_type) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (ext_type))) { current_function_prototype_locus = DECL_SOURCE_LOCATION (ext_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (ext_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (ext_type); } } } } /* Optionally warn of old-fashioned def with no previous prototype. */ if (warn_strict_prototypes && old_decl != error_mark_node && !prototype_p (TREE_TYPE (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl)) warning_at (loc, OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); /* Optionally warn of any global def with no previous prototype. */ else if (warn_missing_prototypes && old_decl != error_mark_node && TREE_PUBLIC (decl1) && !MAIN_NAME_P (DECL_NAME (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_prototypes, "no previous prototype for %qD", decl1); /* Optionally warn of any def with no previous prototype if the function has already been used. */ else if (warn_missing_prototypes && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && !prototype_p (TREE_TYPE (old_decl))) warning_at (loc, OPT_Wmissing_prototypes, "%qD was used with no prototype before its definition", decl1); /* Optionally warn of any global def with no previous declaration. */ else if (warn_missing_declarations && TREE_PUBLIC (decl1) && old_decl == 0 && !MAIN_NAME_P (DECL_NAME (decl1)) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); /* Optionally warn of any def with no previous declaration if the function has already been used. */ else if (warn_missing_declarations && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && C_DECL_IMPLICIT (old_decl)) warning_at (loc, OPT_Wmissing_declarations, "%qD was used with no declaration before its definition", decl1); /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* This is the earliest point at which we might know the assembler name of the function. Thus, if it's set before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1)); /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl1); /* Warn for unlikely, improbable, or stupid declarations of `main'. */ if (warn_main && MAIN_NAME_P (DECL_NAME (decl1))) { if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) != integer_type_node) pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1); else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1)))) pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD", decl1); check_main_parameter_types (decl1); if (!TREE_PUBLIC (decl1)) pedwarn (loc, OPT_Wmain, "%qD is normally a non-static function", decl1); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ current_function_decl = pushdecl (decl1); push_scope (); declare_parm_level (); restype = TREE_TYPE (TREE_TYPE (current_function_decl)); resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (current_function_decl) = resdecl; start_fname_decls (); return 1; } /* Subroutine of store_parm_decls which handles new-style function definitions (prototype format). The parms already have decls, so we need only record them as in effect and complain if any redundant old-style parm decls were written. */ static void store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info) { tree decl; c_arg_tag *tag; unsigned ix; if (current_scope->bindings) { error_at (DECL_SOURCE_LOCATION (fndecl), "old-style parameter declarations in prototyped " "function definition"); /* Get rid of the old-style declarations. */ pop_scope (); push_scope (); } /* Don't issue this warning for nested functions, and don't issue this warning if we got here because ARG_INFO_TYPES was error_mark_node (this happens when a function definition has just an ellipsis in its parameter list). */ else if (!in_system_header_at (input_location) && !current_function_scope && arg_info->types != error_mark_node) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional, "traditional C rejects ISO C style function definitions"); /* Now make all the parameter declarations visible in the function body. We can bypass most of the grunt work of pushdecl. */ for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) { bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); if (!TREE_USED (decl)) warn_if_shadowing (decl); } else error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted"); } /* Record the parameter list in the function declaration. */ DECL_ARGUMENTS (fndecl) = arg_info->parms; /* Now make all the ancillary declarations visible, likewise. */ for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/(TREE_CODE (decl) == FUNCTION_DECL), UNKNOWN_LOCATION); } /* And all the tag declarations. */ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) if (tag->id) bind (tag->id, tag->type, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of store_parm_decls which handles old-style function definitions (separate parameter list and declarations). */ static void store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info) { struct c_binding *b; tree parm, decl, last; tree parmids = arg_info->parms; hash_set<tree> seen_args; if (!in_system_header_at (input_location)) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); /* Match each formal parameter name with its declaration. Save each decl in the appropriate TREE_PURPOSE slot of the parmids chain. */ for (parm = parmids; parm; parm = TREE_CHAIN (parm)) { if (TREE_VALUE (parm) == 0) { error_at (DECL_SOURCE_LOCATION (fndecl), "parameter name missing from parameter list"); TREE_PURPOSE (parm) = 0; continue; } b = I_SYMBOL_BINDING (TREE_VALUE (parm)); if (b && B_IN_CURRENT_SCOPE (b)) { decl = b->decl; /* Skip erroneous parameters. */ if (decl == error_mark_node) continue; /* If we got something other than a PARM_DECL it is an error. */ if (TREE_CODE (decl) != PARM_DECL) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as a non-parameter", decl); /* If the declaration is already marked, we have a duplicate name. Complain and ignore the duplicate. */ else if (seen_args.contains (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "multiple parameters named %qD", decl); TREE_PURPOSE (parm) = 0; continue; } /* If the declaration says "void", complain and turn it into an int. */ else if (VOID_TYPE_P (TREE_TYPE (decl))) { error_at (DECL_SOURCE_LOCATION (decl), "parameter %qD declared with void type", decl); TREE_TYPE (decl) = integer_type_node; DECL_ARG_TYPE (decl) = integer_type_node; layout_decl (decl, 0); } warn_if_shadowing (decl); } /* If no declaration found, default to int. */ else { /* FIXME diagnostics: This should be the location of the argument, not the FNDECL. E.g., for an old-style declaration int f10(v) { blah; } We should use the location of the V, not the F10. Unfortunately, the V is an IDENTIFIER_NODE which has no location. In the future we need locations for c_arg_info entries. See gcc.dg/Wshadow-3.c for an example of this problem. */ decl = build_decl (DECL_SOURCE_LOCATION (fndecl), PARM_DECL, TREE_VALUE (parm), integer_type_node); DECL_ARG_TYPE (decl) = TREE_TYPE (decl); pushdecl (decl); warn_if_shadowing (decl); if (flag_isoc99) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wimplicit_int, "type of %qD defaults to %<int%>", decl); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wmissing_parameter_type, "type of %qD defaults to %<int%>", decl); } TREE_PURPOSE (parm) = decl; seen_args.add (decl); } /* Now examine the parms chain for incomplete declarations and declarations with no corresponding names. */ for (b = current_scope->bindings; b; b = b->prev) { parm = b->decl; if (TREE_CODE (parm) != PARM_DECL) continue; if (TREE_TYPE (parm) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (parm))) { error_at (DECL_SOURCE_LOCATION (parm), "parameter %qD has incomplete type", parm); TREE_TYPE (parm) = error_mark_node; } if (!seen_args.contains (parm)) { error_at (DECL_SOURCE_LOCATION (parm), "declaration for parameter %qD but no such parameter", parm); /* Pretend the parameter was not missing. This gets us to a standard state and minimizes further error messages. */ parmids = chainon (parmids, tree_cons (parm, 0, 0)); } } /* Chain the declarations together in the order of the list of names. Store that chain in the function decl, replacing the list of names. Update the current scope to match. */ DECL_ARGUMENTS (fndecl) = 0; for (parm = parmids; parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) break; if (parm && TREE_PURPOSE (parm)) { last = TREE_PURPOSE (parm); DECL_ARGUMENTS (fndecl) = last; for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) { DECL_CHAIN (last) = TREE_PURPOSE (parm); last = TREE_PURPOSE (parm); } DECL_CHAIN (last) = 0; } /* If there was a previous prototype, set the DECL_ARG_TYPE of each argument according to the type previously specified, and report any mismatches. */ if (current_function_prototype_arg_types) { tree type; for (parm = DECL_ARGUMENTS (fndecl), type = current_function_prototype_arg_types; parm || (type && TREE_VALUE (type) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node)); parm = DECL_CHAIN (parm), type = TREE_CHAIN (type)) { if (parm == 0 || type == 0 || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node) { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (fndecl), 0, "number of arguments doesn%'t match " "built-in prototype"); else { /* FIXME diagnostics: This should be the location of FNDECL, but there is bug when a prototype is declared inside function context, but defined outside of it (e.g., gcc.dg/pr15698-2.c). In which case FNDECL gets the location of the prototype, not the definition. */ error_at (input_location, "number of arguments doesn%'t match prototype"); error_at (current_function_prototype_locus, "prototype declaration"); } break; } /* Type for passing arg must be consistent with that declared for the arg. ISO C says we take the unqualified type for parameters declared with qualified type. */ if (TREE_TYPE (parm) != error_mark_node && TREE_TYPE (type) != error_mark_node && ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) != TYPE_ATOMIC (TREE_VALUE (type))) || !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)), TYPE_MAIN_VARIANT (TREE_VALUE (type))))) { if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) == TYPE_ATOMIC (TREE_VALUE (type))) && (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == TYPE_MAIN_VARIANT (TREE_VALUE (type)))) { /* Adjust argument to match prototype. E.g. a previous `int foo(float);' prototype causes `int foo(x) float x; {...}' to be treated like `int foo(float x) {...}'. This is particularly useful for argument types like uid_t. */ DECL_ARG_TYPE (parm) = TREE_TYPE (parm); if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl)) && INTEGRAL_TYPE_P (TREE_TYPE (parm)) && TYPE_PRECISION (TREE_TYPE (parm)) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (parm) = c_type_promotes_to (TREE_TYPE (parm)); /* ??? Is it possible to get here with a built-in prototype or will it always have been diagnosed as conflicting with an old-style definition and discarded? */ if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match built-in prototype", parm); else { pedwarn (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match prototype", parm); pedwarn (current_function_prototype_locus, OPT_Wpedantic, "prototype declaration"); } } else { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), 0, "argument %qD doesn%'t match " "built-in prototype", parm); else { error_at (DECL_SOURCE_LOCATION (parm), "argument %qD doesn%'t match prototype", parm); error_at (current_function_prototype_locus, "prototype declaration"); } } } } TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0; } /* Otherwise, create a prototype that would match. */ else { tree actual = 0, last = 0, type; for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm)) { type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; last = type; } type = tree_cons (NULL_TREE, void_type_node, NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES of the type of this function, but we need to avoid having this affect the types of other similarly-typed functions, so we must first force the generation of an identical (but separate) type node for the relevant function type. The new node we create will be a variant of the main variant of the original function type. */ TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl)); TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual; } } /* Store parameter declarations passed in ARG_INFO into the current function declaration. */ void store_parm_decls_from (struct c_arg_info *arg_info) { current_function_arg_info = arg_info; store_parm_decls (); } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. For an old-style definition, construct a prototype out of the old-style parameter declarations and inject it into the function's type. */ void store_parm_decls (void) { tree fndecl = current_function_decl; bool proto; /* The argument information block for FNDECL. */ struct c_arg_info *arg_info = current_function_arg_info; current_function_arg_info = 0; /* True if this definition is written with a prototype. Note: despite C99 6.7.5.3p14, we can *not* treat an empty argument list in a function definition as equivalent to (void) -- an empty argument list specifies the function has no parameters, but only (void) sets up a prototype for future calls. */ proto = arg_info->types != 0; if (proto) store_parm_decls_newstyle (fndecl, arg_info); else store_parm_decls_oldstyle (fndecl, arg_info); /* The next call to push_scope will be a function body. */ next_is_function_body = true; /* Write a record describing this function definition to the prototypes file (if requested). */ gen_aux_info_record (fndecl, 1, 0, proto); /* Initialize the RTL code for the function. */ allocate_struct_function (fndecl, false); if (warn_unused_local_typedefs) cfun->language = ggc_cleared_alloc<language_function> (); /* Begin the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = push_stmt_list (); /* ??? Insert the contents of the pending sizes list into the function to be evaluated. The only reason left to have this is void foo(int n, int array[n++]) because we throw away the array type in favor of a pointer type, and thus won't naturally see the SAVE_EXPR containing the increment. All other pending sizes would be handled by gimplify_parameters. */ if (arg_info->pending_sizes) add_stmt (arg_info->pending_sizes); } /* Store PARM_DECLs in PARMS into scope temporarily. Used for c_finish_omp_declare_simd for function prototypes. No diagnostics should be done. */ void temp_store_parm_decls (tree fndecl, tree parms) { push_scope (); for (tree p = parms; p; p = DECL_CHAIN (p)) { DECL_CONTEXT (p) = fndecl; if (DECL_NAME (p)) bind (DECL_NAME (p), p, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } } /* Undo what temp_store_parm_decls did. */ void temp_pop_parm_decls (void) { /* Clear all bindings in this temporary scope, so that pop_scope doesn't create a BLOCK. */ struct c_binding *b = current_scope->bindings; current_scope->bindings = NULL; for (; b; b = free_binding_and_advance (b)) { gcc_assert (TREE_CODE (b->decl) == PARM_DECL || b->decl == error_mark_node); gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } pop_scope (); } /* Finish up a function declaration and compile that function all the way to assembler language output. Then free the storage for the function definition. This is called after parsing the body of the function definition. */ void finish_function (void) { tree fndecl = current_function_decl; if (c_dialect_objc ()) objc_finish_function (); if (TREE_CODE (fndecl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (fndecl))) { tree args = DECL_ARGUMENTS (fndecl); for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Must mark the RESULT_DECL as being in this function. */ if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node) DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) == integer_type_node && flag_isoc99) { /* Hack. We don't want the middle-end to warn that this return is unreachable, so we mark its location as special. Using UNKNOWN_LOCATION has the problem that it gets clobbered in annotate_one_with_locus. A cleaner solution might be to ensure ! should_carry_locus_p (stmt), but that needs a flag. */ c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE); } /* Tie off the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); /* If the function has _Cilk_spawn in front of a function call inside it i.e. it is a spawning function, then add the appropriate Cilk plus functions inside. */ if (fn_contains_cilk_spawn_p (cfun)) cfun->cilk_frame_decl = insert_cilk_frame (fndecl); finish_fname_decls (); /* Complain if there's just no return statement. */ if (warn_return_type && TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE && !current_function_returns_value && !current_function_returns_null /* Don't complain if we are no-return. */ && !current_function_returns_abnormally /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) /* Don't warn for main(). */ && !MAIN_NAME_P (DECL_NAME (fndecl)) /* Or if they didn't actually specify a return type. */ && !C_FUNCTION_IMPLICIT_INT (fndecl) /* Normally, with -Wreturn-type, flow will complain, but we might optimize out static functions. */ && !TREE_PUBLIC (fndecl)) { warning (OPT_Wreturn_type, "no return statement in function returning non-void"); TREE_NO_WARNING (fndecl) = 1; } /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = input_location; /* Finalize the ELF visibility for the function. */ c_determine_visibility (fndecl); /* For GNU C extern inline functions disregard inline limits. */ if (DECL_EXTERNAL (fndecl) && DECL_DECLARED_INLINE_P (fndecl)) DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1; /* Genericize before inlining. Delay genericizing nested functions until their parent function is genericized. Since finalizing requires GENERIC, delay that as well. */ if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node && !undef_nested_function) { if (!decl_function_context (fndecl)) { invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); c_genericize (fndecl); /* ??? Objc emits functions after finalizing the compilation unit. This should be cleaned up later and this conditional removed. */ if (symtab->global_info_ready) { cgraph_node::add_new_function (fndecl, false); return; } cgraph_node::finalize_function (fndecl, false); } else { /* Register this function with cgraph just far enough to get it added to our parent's nested function list. Handy, since the C front end doesn't have such a list. */ (void) cgraph_node::get_create (fndecl); } } if (!decl_function_context (fndecl)) undef_nested_function = false; if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); current_function_decl = NULL; } /* Check the declarations given in a for-loop for satisfying the C99 constraints. If exactly one such decl is found, return it. LOC is the location of the opening parenthesis of the for loop. The last parameter allows you to control the "for loop initial declarations are only allowed in C99 mode". Normally, you should pass flag_isoc99 as that parameter. But in some cases (Objective-C foreach loop, for example) we want to run the checks in this function even if not in C99 mode, so we allow the caller to turn off the error about not being in C99 mode. */ tree check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error) { struct c_binding *b; tree one_decl = NULL_TREE; int n_decls = 0; if (!turn_off_iso_c99_error) { static bool hint = true; /* If we get here, declarations have been used in a for loop without the C99 for loop scope. This doesn't make much sense, so don't allow it. */ error_at (loc, "%<for%> loop initial declarations " "are only allowed in C99 or C11 mode"); if (hint) { inform (loc, "use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 " "to compile your code"); hint = false; } return NULL_TREE; } /* C99 subclause 6.8.5 paragraph 3: [#3] The declaration part of a for statement shall only declare identifiers for objects having storage class auto or register. It isn't clear whether, in this sentence, "identifiers" binds to "shall only declare" or to "objects" - that is, whether all identifiers declared must be identifiers for objects, or whether the restriction only applies to those that are. (A question on this in comp.std.c in November 2000 received no answer.) We implement the strictest interpretation, to avoid creating an extension which later causes problems. */ for (b = current_scope->bindings; b; b = b->prev) { tree id = b->id; tree decl = b->decl; if (!id) continue; switch (TREE_CODE (decl)) { case VAR_DECL: { location_t decl_loc = DECL_SOURCE_LOCATION (decl); if (TREE_STATIC (decl)) error_at (decl_loc, "declaration of static variable %qD in %<for%> loop " "initial declaration", decl); else if (DECL_EXTERNAL (decl)) error_at (decl_loc, "declaration of %<extern%> variable %qD in %<for%> loop " "initial declaration", decl); } break; case RECORD_TYPE: error_at (loc, "%<struct %E%> declared in %<for%> loop initial " "declaration", id); break; case UNION_TYPE: error_at (loc, "%<union %E%> declared in %<for%> loop initial declaration", id); break; case ENUMERAL_TYPE: error_at (loc, "%<enum %E%> declared in %<for%> loop " "initial declaration", id); break; default: error_at (loc, "declaration of non-variable " "%qD in %<for%> loop initial declaration", decl); } n_decls++; one_decl = decl; } return n_decls == 1 ? one_decl : NULL_TREE; } /* Save and reinitialize the variables used during compilation of a C function. */ void c_push_function_context (void) { struct language_function *p = cfun->language; /* cfun->language might have been already allocated by the use of -Wunused-local-typedefs. In that case, just re-use it. */ if (p == NULL) cfun->language = p = ggc_cleared_alloc<language_function> (); p->base.x_stmt_tree = c_stmt_tree; c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list); p->x_break_label = c_break_label; p->x_cont_label = c_cont_label; p->x_switch_stack = c_switch_stack; p->arg_info = current_function_arg_info; p->returns_value = current_function_returns_value; p->returns_null = current_function_returns_null; p->returns_abnormally = current_function_returns_abnormally; p->warn_about_return_type = warn_about_return_type; push_function_context (); } /* Restore the variables used during compilation of a C function. */ void c_pop_function_context (void) { struct language_function *p; pop_function_context (); p = cfun->language; /* When -Wunused-local-typedefs is in effect, cfun->languages is used to store data throughout the life time of the current cfun, So don't deallocate it. */ if (!warn_unused_local_typedefs) cfun->language = NULL; if (DECL_STRUCT_FUNCTION (current_function_decl) == 0 && DECL_SAVED_TREE (current_function_decl) == NULL_TREE) { /* Stop pointing to the local nodes about to be freed. */ /* But DECL_INITIAL must remain nonzero so we know this was an actual function definition. */ DECL_INITIAL (current_function_decl) = error_mark_node; DECL_ARGUMENTS (current_function_decl) = 0; } c_stmt_tree = p->base.x_stmt_tree; p->base.x_stmt_tree.x_cur_stmt_list = NULL; c_break_label = p->x_break_label; c_cont_label = p->x_cont_label; c_switch_stack = p->x_switch_stack; current_function_arg_info = p->arg_info; current_function_returns_value = p->returns_value; current_function_returns_null = p->returns_null; current_function_returns_abnormally = p->returns_abnormally; warn_about_return_type = p->warn_about_return_type; } /* The functions below are required for functionality of doing function at once processing in the C front end. Currently these functions are not called from anywhere in the C front end, but as these changes continue, that will change. */ /* Returns the stmt_tree (if any) to which statements are currently being added. If there is no active statement-tree, NULL is returned. */ stmt_tree current_stmt_tree (void) { return &c_stmt_tree; } /* Return the global value of T as a symbol. */ tree identifier_global_value (tree t) { struct c_binding *b; for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return 0; } /* In C, the only C-linkage public declaration is at file scope. */ tree c_linkage_bindings (tree name) { return identifier_global_value (name); } /* Record a builtin type for C. If NAME is non-NULL, it is the name used; otherwise the name is found in ridpointers from RID_INDEX. */ void record_builtin_type (enum rid rid_index, const char *name, tree type) { tree id, decl; if (name == 0) id = ridpointers[(int) rid_index]; else id = get_identifier (name); decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type); pushdecl (decl); if (debug_hooks->type_decl) debug_hooks->type_decl (decl, false); } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } /* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */ struct c_parm * build_c_parm (struct c_declspecs *specs, tree attrs, struct c_declarator *declarator) { struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm); ret->specs = specs; ret->attrs = attrs; ret->declarator = declarator; return ret; } /* Return a declarator with nested attributes. TARGET is the inner declarator to which these attributes apply. ATTRS are the attributes. */ struct c_declarator * build_attrs_declarator (tree attrs, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_attrs; ret->declarator = target; ret->u.attrs = attrs; return ret; } /* Return a declarator for a function with arguments specified by ARGS and return type specified by TARGET. */ struct c_declarator * build_function_declarator (struct c_arg_info *args, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_function; ret->declarator = target; ret->u.arg_info = args; return ret; } /* Return a declarator for the identifier IDENT (which may be NULL_TREE for an abstract declarator). */ struct c_declarator * build_id_declarator (tree ident) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_id; ret->declarator = 0; ret->u.id = ident; /* Default value - may get reset to a more precise location. */ ret->id_loc = input_location; return ret; } /* Return something to represent absolute declarators containing a *. TARGET is the absolute declarator that the * contains. TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes to apply to the pointer type. */ struct c_declarator * make_pointer_declarator (struct c_declspecs *type_quals_attrs, struct c_declarator *target) { tree attrs; int quals = 0; struct c_declarator *itarget = target; struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); if (type_quals_attrs) { attrs = type_quals_attrs->attrs; quals = quals_from_declspecs (type_quals_attrs); if (attrs != NULL_TREE) itarget = build_attrs_declarator (attrs, target); } ret->kind = cdk_pointer; ret->declarator = itarget; ret->u.pointer_quals = quals; return ret; } /* Return a pointer to a structure for an empty list of declaration specifiers. */ struct c_declspecs * build_null_declspecs (void) { struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs); memset (&ret->locations, 0, cdw_number_of_elements); ret->type = 0; ret->expr = 0; ret->decl_attr = 0; ret->attrs = 0; ret->align_log = -1; ret->typespec_word = cts_none; ret->storage_class = csc_none; ret->expr_const_operands = true; ret->declspecs_seen_p = false; ret->typespec_kind = ctsk_none; ret->non_sc_seen_p = false; ret->typedef_p = false; ret->explicit_signed_p = false; ret->deprecated_p = false; ret->default_int_p = false; ret->long_p = false; ret->long_long_p = false; ret->short_p = false; ret->signed_p = false; ret->unsigned_p = false; ret->complex_p = false; ret->inline_p = false; ret->noreturn_p = false; ret->thread_p = false; ret->thread_gnu_p = false; ret->const_p = false; ret->volatile_p = false; ret->atomic_p = false; ret->restrict_p = false; ret->saturating_p = false; ret->alignas_p = false; ret->address_space = ADDR_SPACE_GENERIC; return ret; } /* Add the address space ADDRSPACE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_addrspace (source_location location, struct c_declspecs *specs, addr_space_t as) { specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; if (!ADDR_SPACE_GENERIC_P (specs->address_space) && specs->address_space != as) error ("incompatible address space qualifiers %qs and %qs", c_addr_space_name (as), c_addr_space_name (specs->address_space)); else { specs->address_space = as; specs->locations[cdw_address_space] = location; } return specs; } /* Add the type qualifier QUAL to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_qual (source_location loc, struct c_declspecs *specs, tree qual) { enum rid i; bool dupe = false; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (qual)); i = C_RID_CODE (qual); switch (i) { case RID_CONST: dupe = specs->const_p; specs->const_p = true; specs->locations[cdw_const] = loc; break; case RID_VOLATILE: dupe = specs->volatile_p; specs->volatile_p = true; specs->locations[cdw_volatile] = loc; break; case RID_RESTRICT: dupe = specs->restrict_p; specs->restrict_p = true; specs->locations[cdw_restrict] = loc; break; case RID_ATOMIC: dupe = specs->atomic_p; specs->atomic_p = true; break; default: gcc_unreachable (); } if (dupe) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %qE", qual); return specs; } /* Add the type specifier TYPE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_type (location_t loc, struct c_declspecs *specs, struct c_typespec spec) { tree type = spec.spec; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->typespec_kind = spec.kind; if (TREE_DEPRECATED (type)) specs->deprecated_p = true; /* Handle type specifier keywords. */ if (TREE_CODE (type) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (type) && C_RID_CODE (type) != RID_CXX_COMPAT_WARN) { enum rid i = C_RID_CODE (type); if (specs->type) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } if ((int) i <= (int) RID_LAST_MODIFIER) { /* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */ bool dupe = false; switch (i) { case RID_LONG: if (specs->long_long_p) { error_at (loc, "%<long long long%> is too long for GCC"); break; } if (specs->long_p) { if (specs->typespec_word == cts_double) { error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); break; } pedwarn_c90 (loc, OPT_Wlong_long, "ISO C90 does not support %<long long%>"); specs->long_long_p = 1; specs->locations[cdw_long_long] = loc; break; } if (specs->short_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<long%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<long%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<long%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<long%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->long_p = true; specs->locations[cdw_long] = loc; } break; case RID_SHORT: dupe = specs->short_p; if (specs->long_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<short%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<short%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<short%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<short%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->short_p = true; specs->locations[cdw_short] = loc; } break; case RID_SIGNED: dupe = specs->signed_p; if (specs->unsigned_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<signed%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<signed%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<signed%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->signed_p = true; specs->locations[cdw_signed] = loc; } break; case RID_UNSIGNED: dupe = specs->unsigned_p; if (specs->signed_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<unsigned%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<unsigned%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<unsigned%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->unsigned_p = true; specs->locations[cdw_unsigned] = loc; } break; case RID_COMPLEX: dupe = specs->complex_p; if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<complex%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<complex%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<complex%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->typespec_word == cts_fract) error_at (loc, ("both %<complex%> and %<_Fract%> in " "declaration specifiers")); else if (specs->typespec_word == cts_accum) error_at (loc, ("both %<complex%> and %<_Accum%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<complex%> and %<_Sat%> in " "declaration specifiers")); else { specs->complex_p = true; specs->locations[cdw_complex] = loc; } break; case RID_SAT: dupe = specs->saturating_p; pedwarn (loc, OPT_Wpedantic, "ISO C does not support saturating types"); if (specs->typespec_word == cts_int_n) { error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); } else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<_Sat%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<_Sat%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<_Sat%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<_Sat%> and %<complex%> in " "declaration specifiers")); else { specs->saturating_p = true; specs->locations[cdw_saturating] = loc; } break; default: gcc_unreachable (); } if (dupe) error_at (loc, "duplicate %qE", type); return specs; } else { /* "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "__intN", "_Decimal64", "_Decimal128", "_Fract", "_Accum" or "__auto_type". */ if (specs->typespec_word != cts_none) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } switch (i) { case RID_AUTO_TYPE: if (specs->long_p) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else { specs->typespec_word = cts_auto_type; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: specs->int_n_idx = i - RID_INT_N_0; if (!in_system_header_at (input_location)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support %<__int%d%> types", int_n_data[specs->int_n_idx].bitsize); if (specs->long_p) error_at (loc, ("both %<__int%d%> and %<long%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->short_p) error_at (loc, ("both %<__int%d%> and %<short%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (! int_n_enabled_p [specs->int_n_idx]) error_at (loc, "%<__int%d%> is not supported on this target", int_n_data[specs->int_n_idx].bitsize); else { specs->typespec_word = cts_int_n; specs->locations[cdw_typespec] = loc; } return specs; case RID_VOID: if (specs->long_p) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else { specs->typespec_word = cts_void; specs->locations[cdw_typespec] = loc; } return specs; case RID_BOOL: if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support boolean types"); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else { specs->typespec_word = cts_bool; specs->locations[cdw_typespec] = loc; } return specs; case RID_CHAR: if (specs->long_p) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else { specs->typespec_word = cts_char; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT: if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else { specs->typespec_word = cts_int; specs->locations[cdw_typespec] = loc; } return specs; case RID_FLOAT: if (specs->long_p) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else { specs->typespec_word = cts_float; specs->locations[cdw_typespec] = loc; } return specs; case RID_DOUBLE: if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else { specs->typespec_word = cts_double; specs->locations[cdw_typespec] = loc; } return specs; case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: { const char *str; if (i == RID_DFLOAT32) str = "_Decimal32"; else if (i == RID_DFLOAT64) str = "_Decimal64"; else str = "_Decimal128"; if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<%s%> in " "declaration specifiers"), str); if (specs->long_p) error_at (loc, ("both %<long%> and %<%s%> in " "declaration specifiers"), str); else if (specs->short_p) error_at (loc, ("both %<short%> and %<%s%> in " "declaration specifiers"), str); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<%s%> in " "declaration specifiers"), str); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<%s%> in " "declaration specifiers"), str); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_DFLOAT32) specs->typespec_word = cts_dfloat32; else if (i == RID_DFLOAT64) specs->typespec_word = cts_dfloat64; else specs->typespec_word = cts_dfloat128; specs->locations[cdw_typespec] = loc; } if (!targetm.decimal_float_supported_p ()) error_at (loc, ("decimal floating point not supported " "for this target")); pedwarn (loc, OPT_Wpedantic, "ISO C does not support decimal floating point"); return specs; case RID_FRACT: case RID_ACCUM: { const char *str; if (i == RID_FRACT) str = "_Fract"; else str = "_Accum"; if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_FRACT) specs->typespec_word = cts_fract; else specs->typespec_word = cts_accum; specs->locations[cdw_typespec] = loc; } if (!targetm.fixed_point_supported_p ()) error_at (loc, "fixed-point types not supported for this target"); pedwarn (loc, OPT_Wpedantic, "ISO C does not support fixed-point types"); return specs; default: /* ObjC reserved word "id", handled below. */ break; } } } /* Now we have a typedef (a TYPE_DECL node), an identifier (some form of ObjC type, cases such as "int" and "long" being handled above), a TYPE (struct, union, enum and typeof specifiers) or an ERROR_MARK. In none of these cases may there have previously been any type specifiers. */ if (specs->type || specs->typespec_word != cts_none || specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p || specs->complex_p) error_at (loc, "two or more data types in declaration specifiers"); else if (TREE_CODE (type) == TYPE_DECL) { if (TREE_TYPE (type) == error_mark_node) ; /* Allow the type to default to int to avoid cascading errors. */ else { specs->type = TREE_TYPE (type); specs->decl_attr = DECL_ATTRIBUTES (type); specs->typedef_p = true; specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type); specs->locations[cdw_typedef] = loc; /* If this typedef name is defined in a struct, then a C++ lookup would return a different value. */ if (warn_cxx_compat && I_SYMBOL_BINDING (DECL_NAME (type))->in_struct) warning_at (loc, OPT_Wc___compat, "C++ lookup of %qD would return a field, not a type", type); /* If we are parsing a struct, record that a struct field used a typedef. */ if (warn_cxx_compat && struct_parse_info != NULL) struct_parse_info->typedefs_seen.safe_push (type); } } else if (TREE_CODE (type) == IDENTIFIER_NODE) { tree t = lookup_name (type); if (!t || TREE_CODE (t) != TYPE_DECL) error_at (loc, "%qE fails to be a typedef or built in type", type); else if (TREE_TYPE (t) == error_mark_node) ; else { specs->type = TREE_TYPE (t); specs->locations[cdw_typespec] = loc; } } else { if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof) { specs->typedef_p = true; specs->locations[cdw_typedef] = loc; if (spec.expr) { if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr), specs->expr, spec.expr); else specs->expr = spec.expr; specs->expr_const_operands &= spec.expr_const_operands; } } specs->type = type; } return specs; } /* Add the storage class specifier or function specifier SCSPEC to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_scspec (source_location loc, struct c_declspecs *specs, tree scspec) { enum rid i; enum c_storage_class n = csc_none; bool dupe = false; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (scspec)); i = C_RID_CODE (scspec); if (specs->non_sc_seen_p) warning (OPT_Wold_style_declaration, "%qE is not at beginning of declaration", scspec); switch (i) { case RID_INLINE: /* C99 permits duplicate inline. Although of doubtful utility, it seems simplest to permit it in gnu89 mode as well, as there is also little utility in maintaining this as a difference between gnu89 and C99 inline. */ dupe = false; specs->inline_p = true; specs->locations[cdw_inline] = loc; break; case RID_NORETURN: /* Duplicate _Noreturn is permitted. */ dupe = false; specs->noreturn_p = true; specs->locations[cdw_noreturn] = loc; break; case RID_THREAD: dupe = specs->thread_p; if (specs->storage_class == csc_auto) error ("%qE used with %<auto%>", scspec); else if (specs->storage_class == csc_register) error ("%qE used with %<register%>", scspec); else if (specs->storage_class == csc_typedef) error ("%qE used with %<typedef%>", scspec); else { specs->thread_p = true; specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec), "__thread") == 0); /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spelling. */ if (!specs->thread_gnu_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %qE", scspec); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %qE", scspec); } specs->locations[cdw_thread] = loc; } break; case RID_AUTO: n = csc_auto; break; case RID_EXTERN: n = csc_extern; /* Diagnose "__thread extern". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<extern%>"); break; case RID_REGISTER: n = csc_register; break; case RID_STATIC: n = csc_static; /* Diagnose "__thread static". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<static%>"); break; case RID_TYPEDEF: n = csc_typedef; break; default: gcc_unreachable (); } if (n != csc_none && n == specs->storage_class) dupe = true; if (dupe) { if (i == RID_THREAD) error ("duplicate %<_Thread_local%> or %<__thread%>"); else error ("duplicate %qE", scspec); } if (n != csc_none) { if (specs->storage_class != csc_none && n != specs->storage_class) { error ("multiple storage classes in declaration specifiers"); } else { specs->storage_class = n; specs->locations[cdw_storage_class] = loc; if (n != csc_extern && n != csc_static && specs->thread_p) { error ("%qs used with %qE", specs->thread_gnu_p ? "__thread" : "_Thread_local", scspec); specs->thread_p = false; } } } return specs; } /* Add the attributes ATTRS to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs) { specs->attrs = chainon (attrs, specs->attrs); specs->locations[cdw_attributes] = loc; specs->declspecs_seen_p = true; return specs; } /* Add an _Alignas specifier (expression ALIGN, or type whose alignment is ALIGN) to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_alignas (source_location loc, struct c_declspecs *specs, tree align) { int align_log; specs->alignas_p = true; specs->locations[cdw_alignas] = loc; if (align == error_mark_node) return specs; align_log = check_user_alignment (align, true); if (align_log > specs->align_log) specs->align_log = align_log; return specs; } /* Combine "long", "short", "signed", "unsigned" and "_Complex" type specifiers with any other type specifier to determine the resulting type. This is where ISO C checks on complex types are made, since "_Complex long" is a prefix of the valid ISO C type "_Complex long double". */ struct c_declspecs * finish_declspecs (struct c_declspecs *specs) { /* If a type was specified as a whole, we have no modifiers and are done. */ if (specs->type != NULL_TREE) { gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Set a dummy type. */ if (TREE_CODE (specs->type) == ERROR_MARK) specs->type = integer_type_node; return specs; } /* If none of "void", "_Bool", "char", "int", "float" or "double" has been specified, treat it as "int" unless "_Complex" is present and there are no other specifiers. If we just have "_Complex", it is equivalent to "_Complex double", but e.g. "_Complex short" is equivalent to "_Complex short int". */ if (specs->typespec_word == cts_none) { if (specs->saturating_p) { error_at (specs->locations[cdw_saturating], "%<_Sat%> is used without %<_Fract%> or %<_Accum%>"); if (!targetm.fixed_point_supported_p ()) error_at (specs->locations[cdw_saturating], "fixed-point types not supported for this target"); specs->typespec_word = cts_fract; } else if (specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p) { specs->typespec_word = cts_int; } else if (specs->complex_p) { specs->typespec_word = cts_double; pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support plain %<complex%> meaning " "%<double complex%>"); } else { specs->typespec_word = cts_int; specs->default_int_p = true; /* We don't diagnose this here because grokdeclarator will give more specific diagnostics according to whether it is a function definition. */ } } /* If "signed" was specified, record this to distinguish "int" and "signed int" in the case of a bit-field with -funsigned-bitfields. */ specs->explicit_signed_p = specs->signed_p; /* Now compute the actual type. */ switch (specs->typespec_word) { case cts_auto_type: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Type to be filled in later. */ break; case cts_void: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = void_type_node; break; case cts_bool: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = boolean_type_node; break; case cts_char: gcc_assert (!specs->long_p && !specs->short_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->signed_p) specs->type = signed_char_type_node; else if (specs->unsigned_p) specs->type = unsigned_char_type_node; else specs->type = char_type_node; if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int_n: gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); specs->type = (specs->unsigned_p ? int_n_trees[specs->int_n_idx].unsigned_type : int_n_trees[specs->int_n_idx].signed_type); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int: gcc_assert (!(specs->long_p && specs->short_p)); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->long_long_p) specs->type = (specs->unsigned_p ? long_long_unsigned_type_node : long_long_integer_type_node); else if (specs->long_p) specs->type = (specs->unsigned_p ? long_unsigned_type_node : long_integer_type_node); else if (specs->short_p) specs->type = (specs->unsigned_p ? short_unsigned_type_node : short_integer_type_node); else specs->type = (specs->unsigned_p ? unsigned_type_node : integer_type_node); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_float: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); specs->type = (specs->complex_p ? complex_float_type_node : float_type_node); break; case cts_double: gcc_assert (!specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (specs->long_p) { specs->type = (specs->complex_p ? complex_long_double_type_node : long_double_type_node); } else { specs->type = (specs->complex_p ? complex_double_type_node : double_type_node); } break; case cts_dfloat32: case cts_dfloat64: case cts_dfloat128: gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); if (specs->typespec_word == cts_dfloat32) specs->type = dfloat32_type_node; else if (specs->typespec_word == cts_dfloat64) specs->type = dfloat64_type_node; else specs->type = dfloat128_type_node; break; case cts_fract: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_fract_type_node : sat_long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_fract_type_node : sat_long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_fract_type_node : sat_short_fract_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_fract_type_node : sat_fract_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_fract_type_node : long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_fract_type_node : long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_fract_type_node : short_fract_type_node; else specs->type = specs->unsigned_p ? unsigned_fract_type_node : fract_type_node; } break; case cts_accum: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_accum_type_node : sat_long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_accum_type_node : sat_long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_accum_type_node : sat_short_accum_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_accum_type_node : sat_accum_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_accum_type_node : long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_accum_type_node : long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_accum_type_node : short_accum_type_node; else specs->type = specs->unsigned_p ? unsigned_accum_type_node : accum_type_node; } break; default: gcc_unreachable (); } return specs; } /* A subroutine of c_write_global_declarations. Perform final processing on one file scope's declarations (or the external scope's declarations), GLOBALS. */ static void c_write_global_declarations_1 (tree globals) { tree decl; bool reconsider; /* Process the decls in the order they were written. */ for (decl = globals; decl; decl = DECL_CHAIN (decl)) { /* Check for used but undefined static functions using the C standard's definition of "used", and set TREE_NO_WARNING so that check_global_declarations doesn't repeat the check. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == 0 && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl) && C_DECL_USED (decl)) { pedwarn (input_location, 0, "%q+F used but never defined", decl); TREE_NO_WARNING (decl) = 1; } wrapup_global_declaration_1 (decl); } do { reconsider = false; for (decl = globals; decl; decl = DECL_CHAIN (decl)) reconsider |= wrapup_global_declaration_2 (decl); } while (reconsider); for (decl = globals; decl; decl = DECL_CHAIN (decl)) check_global_declaration_1 (decl); } /* A subroutine of c_write_global_declarations Emit debug information for each of the declarations in GLOBALS. */ static void c_write_global_declarations_2 (tree globals) { tree decl; for (decl = globals; decl ; decl = DECL_CHAIN (decl)) debug_hooks->global_decl (decl); } /* Callback to collect a source_ref from a DECL. */ static void collect_source_ref_cb (tree decl) { if (!DECL_IS_BUILTIN (decl)) collect_source_ref (LOCATION_FILE (decl_sloc (decl, false))); } /* Preserve the external declarations scope across a garbage collect. */ static GTY(()) tree ext_block; /* Collect all references relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { tree t; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file); collect_ada_nodes (BLOCK_VARS (ext_block), source_file); } /* Iterate over all global declarations and call CALLBACK. */ static void for_each_global_decl (void (*callback) (tree decl)) { tree t; tree decls; tree decl; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) { decls = DECL_INITIAL (t); for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl)) callback (decl); } for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl)) callback (decl); } void c_write_global_declarations (void) { tree t; unsigned i; /* We don't want to do this if generating a PCH. */ if (pch_file) return; timevar_start (TV_PHASE_DEFERRED); /* Do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). */ if (c_dialect_objc ()) objc_write_global_declarations (); /* Close the external scope. */ ext_block = pop_scope (); external_scope = 0; gcc_assert (!current_scope); /* Handle -fdump-ada-spec[-slim]. */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { /* Build a table of files to generate specs for */ if (flag_dump_ada_spec_slim) collect_source_ref (main_input_filename); else for_each_global_decl (collect_source_ref_cb); dump_ada_specs (collect_all_refs, NULL); } if (ext_block) { tree tmp = BLOCK_VARS (ext_block); int flags; FILE * stream = dump_begin (TDI_tu, &flags); if (stream && tmp) { dump_node (tmp, flags & ~TDF_SLIM, stream); dump_end (TDI_tu, stream); } } /* Process all file scopes in this compilation, and the external_scope, through wrapup_global_declarations and check_global_declarations. */ FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_1 (BLOCK_VARS (ext_block)); timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_OPT_GEN); /* We're done parsing; proceed to optimize and emit assembly. FIXME: shouldn't be the front end's responsibility to call this. */ symtab->finalize_compilation_unit (); timevar_stop (TV_PHASE_OPT_GEN); timevar_start (TV_PHASE_DBGINFO); /* After cgraph has had a chance to emit everything that's going to be emitted, output debug information for globals. */ if (!seen_error ()) { timevar_push (TV_SYMOUT); FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_2 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_2 (BLOCK_VARS (ext_block)); timevar_pop (TV_SYMOUT); } ext_block = NULL; timevar_stop (TV_PHASE_DBGINFO); } /* Register reserved keyword WORD as qualifier for address space AS. */ void c_register_addr_space (const char *word, addr_space_t as) { int rid = RID_FIRST_ADDR_SPACE + as; tree id; /* Address space qualifiers are only supported in C with GNU extensions enabled. */ if (c_dialect_objc () || flag_no_asm) return; id = get_identifier (word); C_SET_RID_CODE (id, rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [rid] = id; } /* Return identifier to look up for omp declare reduction. */ tree c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id) { const char *p = NULL; switch (reduction_code) { case PLUS_EXPR: p = "+"; break; case MULT_EXPR: p = "*"; break; case MINUS_EXPR: p = "-"; break; case BIT_AND_EXPR: p = "&"; break; case BIT_XOR_EXPR: p = "^"; break; case BIT_IOR_EXPR: p = "|"; break; case TRUTH_ANDIF_EXPR: p = "&&"; break; case TRUTH_ORIF_EXPR: p = "||"; break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); size_t len = strlen (p); char *name = XALLOCAVEC (char, lenp + len); memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); return get_identifier (name); } /* Lookup REDUCTION_ID in the current scope, or create an artificial VAR_DECL, bind it into the current scope and return it. */ tree c_omp_reduction_decl (tree reduction_id) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); if (b != NULL && B_IN_CURRENT_SCOPE (b)) return b->decl; tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL, reduction_id, integer_type_node); DECL_ARTIFICIAL (decl) = 1; DECL_EXTERNAL (decl) = 1; TREE_STATIC (decl) = 1; TREE_PUBLIC (decl) = 0; bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION); return decl; } /* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */ tree c_omp_reduction_lookup (tree reduction_id, tree type) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); while (b) { tree t; for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) return TREE_VALUE (t); b = b->shadowed; } return error_mark_node; } /* Helper function called via walk_tree, to diagnose invalid tree c_check_omp_declare_reduction_r (tree *tp, int *, void *data) { tree *vars = (tree *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != vars[0] && *tp != vars[1]) { location_t loc = DECL_SOURCE_LOCATION (vars[0]); if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0) error_at (loc, "%< "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%< "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } #include "gt-c-c-decl.h"
/* Process declarations and symbol lookup for C front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "input.h" #include "tm.h" #include "intl.h" #include "hash-set.h" #include "vec.h" #include "symtab.h" #include "input.h" #include "alias.h" #include "double-int.h" #include "machmode.h" #include "inchash.h" #include "tree.h" #include "fold-const.h" #include "print-tree.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "stringpool.h" #include "tree-inline.h" #include "flags.h" #include "hashtab.h" #include "hash-set.h" #include "vec.h" #include "machmode.h" #include "hard-reg-set.h" #include "function.h" #include "c-tree.h" #include "toplev.h" #include "tm_p.h" #include "cpplib.h" #include "target.h" #include "debug.h" #include "opts.h" #include "timevar.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "c-lang.h" #include "langhooks.h" #include "tree-iterator.h" #include "diagnostic-core.h" #include "dumpfile.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "ipa-ref.h" #include "cgraph.h" #include "hash-table.h" #include "langhooks-def.h" #include "plugin.h" #include "c-family/c-ada-spec.h" #include "cilk.h" #include "builtins.h" /* In grokdeclarator, distinguish syntactic contexts of declarators. */ enum decl_context { NORMAL, /* Ordinary declaration */ FUNCDEF, /* Function definition */ PARM, /* Declaration of parm before function body */ FIELD, /* Declaration inside struct or union */ TYPENAME}; /* Typename (inside cast or sizeof) */ /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states { DEPRECATED_NORMAL, DEPRECATED_SUPPRESS }; /* Nonzero if we have seen an invalid cross reference to a struct, union, or enum, but not yet printed the message. */ tree pending_invalid_xref; /* File and line to appear in the eventual error message. */ location_t pending_invalid_xref_location; /* The file and line that the prototype came from if this is an old-style definition; used for diagnostics in store_parm_decls_oldstyle. */ static location_t current_function_prototype_locus; /* Whether this prototype was built-in. */ static bool current_function_prototype_built_in; /* The argument type information of this prototype. */ static tree current_function_prototype_arg_types; /* The argument information structure for the function currently being defined. */ static struct c_arg_info *current_function_arg_info; /* The obstack on which parser and related data structures, which are not live beyond their top-level declaration or definition, are allocated. */ struct obstack parser_obstack; /* The current statement tree. */ static GTY(()) struct stmt_tree_s c_stmt_tree; /* State saving variables. */ tree c_break_label; tree c_cont_label; /* A list of decls to be made automatically visible in each file scope. */ static GTY(()) tree visible_builtins; /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ int current_function_returns_abnormally; /* Set to nonzero by `grokdeclarator' for a function whose return type is defaulted, if warnings for this are desired. */ static int warn_about_return_type; /* Nonzero when the current toplevel function contains a declaration of a nested function which is never defined. */ static bool undef_nested_function; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int current_omp_declare_target_attribute; /* Each c_binding structure describes one binding of an identifier to a decl. All the decls in a scope - irrespective of namespace - are chained together by the ->prev field, which (as the name implies) runs in reverse order. All the decls in a given namespace bound to a given identifier are chained by the ->shadowed field, which runs from inner to outer scopes. The ->decl field usually points to a DECL node, but there are two exceptions. In the namespace of type tags, the bound entity is a RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared identifier is encountered, it is bound to error_mark_node to suppress further errors about that identifier in the current function. The ->u.type field stores the type of the declaration in this scope; if NULL, the type is the type of the ->decl field. This is only of relevance for objects with external or internal linkage which may be redeclared in inner scopes, forming composite types that only persist for the duration of those scopes. In the external scope, this stores the composite of all the types declared for this object, visible or not. The ->inner_comp field (used only at file scope) stores whether an incomplete array type at file scope was completed at an inner scope to an array size other than 1. The ->u.label field is used for labels. It points to a structure which stores additional information used for warnings. The depth field is copied from the scope structure that holds this decl. It is used to preserve the proper ordering of the ->shadowed field (see bind()) and also for a handful of special-case checks. Finally, the invisible bit is true for a decl which should be ignored for purposes of normal name lookup, and the nested bit is true for a decl that's been bound a second time in an inner scope; in all such cases, the binding in the outer scope will have its invisible bit true. */ struct GTY((chain_next ("%h.prev"))) c_binding { union GTY(()) { /* first so GTY desc can use decl */ tree GTY((tag ("0"))) type; /* the type in this scope */ struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */ } GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u; tree decl; /* the decl bound */ tree id; /* the identifier it's bound to */ struct c_binding *prev; /* the previous decl in this scope */ struct c_binding *shadowed; /* the innermost decl shadowed by this one */ unsigned int depth : 28; /* depth of this scope */ BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */ BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */ BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */ BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */ location_t locus; /* location for nested bindings */ }; #define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth) #define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth) #define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/) #define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/) /* Each C symbol points to three linked lists of c_binding structures. These describe the values of the identifier in the three different namespaces defined by the language. */ struct GTY(()) lang_identifier { struct c_common_identifier common_id; struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */ struct c_binding *tag_binding; /* struct/union/enum tags */ struct c_binding *label_binding; /* labels */ }; /* Validate c-lang.c's assumptions. */ extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate [(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1]; /* The binding oracle; see c-tree.h. */ void (*c_binding_oracle) (enum c_oracle_request, tree identifier); /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's symbol binding. */ #define I_SYMBOL_CHECKED(node) \ (TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding* * i_symbol_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->symbol_binding == NULL && c_binding_oracle != NULL && !I_SYMBOL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_SYMBOL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_SYMBOL, node); } return &lid->symbol_binding; } #define I_SYMBOL_BINDING(node) (*i_symbol_binding (node)) #define I_SYMBOL_DECL(node) \ (I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's tag binding. */ #define I_TAG_CHECKED(node) \ (TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_tag_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->tag_binding == NULL && c_binding_oracle != NULL && !I_TAG_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_TAG_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_TAG, node); } return &lid->tag_binding; } #define I_TAG_BINDING(node) (*i_tag_binding (node)) #define I_TAG_DECL(node) \ (I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's label binding. */ #define I_LABEL_CHECKED(node) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_label_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->label_binding == NULL && c_binding_oracle != NULL && !I_LABEL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_LABEL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_LABEL, node); } return &lid->label_binding; } #define I_LABEL_BINDING(node) (*i_label_binding (node)) #define I_LABEL_DECL(node) \ (I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0) /* The resulting tree type. */ union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("0"), desc ("tree_node_structure (&%h)"))) generic; struct lang_identifier GTY ((tag ("1"))) identifier; }; /* Track bindings and other things that matter for goto warnings. For efficiency, we do not gather all the decls at the point of definition. Instead, we point into the bindings structure. As scopes are popped, we update these structures and gather the decls that matter at that time. */ struct GTY(()) c_spot_bindings { /* The currently open scope which holds bindings defined when the label was defined or the goto statement was found. */ struct c_scope *scope; /* The bindings in the scope field which were defined at the point of the label or goto. This lets us look at older or newer bindings in the scope, as appropriate. */ struct c_binding *bindings_in_scope; /* The number of statement expressions that have started since this label or goto statement was defined. This is zero if we are at the same statement expression level. It is positive if we are in a statement expression started since this spot. It is negative if this spot was in a statement expression and we have left it. */ int stmt_exprs; /* Whether we started in a statement expression but are no longer in it. This is set to true if stmt_exprs ever goes negative. */ bool left_stmt_expr; }; /* This structure is used to keep track of bindings seen when a goto statement is defined. This is only used if we see the goto statement before we see the label. */ struct GTY(()) c_goto_bindings { /* The location of the goto statement. */ location_t loc; /* The bindings of the goto statement. */ struct c_spot_bindings goto_bindings; }; typedef struct c_goto_bindings *c_goto_bindings_p; /* The additional information we keep track of for a label binding. These fields are updated as scopes are popped. */ struct GTY(()) c_label_vars { /* The shadowed c_label_vars, when one label shadows another (which can only happen using a __label__ declaration). */ struct c_label_vars *shadowed; /* The bindings when the label was defined. */ struct c_spot_bindings label_bindings; /* A list of decls that we care about: decls about which we should warn if a goto branches to this label from later in the function. Decls are added to this list as scopes are popped. We only add the decls that matter. */ vec<tree, va_gc> *decls_in_scope; /* A list of goto statements to this label. This is only used for goto statements seen before the label was defined, so that we can issue appropriate warnings for them. */ vec<c_goto_bindings_p, va_gc> *gotos; }; /* Each c_scope structure describes the complete contents of one scope. Four scopes are distinguished specially: the innermost or current scope, the innermost function scope, the file scope (always the second to outermost) and the outermost or external scope. Most declarations are recorded in the current scope. All normal label declarations are recorded in the innermost function scope, as are bindings of undeclared identifiers to error_mark_node. (GCC permits nested functions as an extension, hence the 'innermost' qualifier.) Explicitly declared labels (using the __label__ extension) appear in the current scope. Being in the file scope (current_scope == file_scope) causes special behavior in several places below. Also, under some conditions the Objective-C front end records declarations in the file scope even though that isn't the current scope. All declarations with external linkage are recorded in the external scope, even if they aren't visible there; this models the fact that such declarations are visible to the entire program, and (with a bit of cleverness, see pushdecl) allows diagnosis of some violations of C99 6.2.2p7 and 6.2.7p2: If, within the same translation unit, the same identifier appears with both internal and external linkage, the behavior is undefined. All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined. Initially only the built-in declarations, which describe compiler intrinsic functions plus a subset of the standard library, are in this scope. The order of the blocks list matters, and it is frequently appended to. To avoid having to walk all the way to the end of the list on each insertion, or reverse the list later, we maintain a pointer to the last list entry. (FIXME: It should be feasible to use a reversed list here.) The bindings list is strictly in reverse order of declarations; pop_scope relies on this. */ struct GTY((chain_next ("%h.outer"))) c_scope { /* The scope containing this one. */ struct c_scope *outer; /* The next outermost function scope. */ struct c_scope *outer_function; /* All bindings in this scope. */ struct c_binding *bindings; /* For each scope (except the global one), a chain of BLOCK nodes for all the scopes that were entered and exited one level down. */ tree blocks; tree blocks_last; /* The depth of this scope. Used to keep the ->shadowed chain of bindings sorted innermost to outermost. */ unsigned int depth : 28; /* True if we are currently filling this scope with parameter declarations. */ BOOL_BITFIELD parm_flag : 1; /* True if we saw [*] in this scope. Used to give an error messages if these appears in a function definition. */ BOOL_BITFIELD had_vla_unspec : 1; /* True if we already complained about forward parameter decls in this scope. This prevents double warnings on foo (int a; int b; ...) */ BOOL_BITFIELD warned_forward_parm_decls : 1; /* True if this is the outermost block scope of a function body. This scope contains the parameters, the local variables declared in the outermost block, and all the labels (except those in nested functions, or declared at block scope with __label__). */ BOOL_BITFIELD function_body : 1; /* True means make a BLOCK for this scope no matter what. */ BOOL_BITFIELD keep : 1; /* True means that an unsuffixed float constant is _Decimal64. */ BOOL_BITFIELD float_const_decimal64 : 1; /* True if this scope has any label bindings. This is used to speed up searching for labels when popping scopes, particularly since labels are normally only found at function scope. */ BOOL_BITFIELD has_label_bindings : 1; /* True if we should issue a warning if a goto statement crosses any of the bindings. We still need to check the list of bindings to find the specific ones we need to warn about. This is true if decl_jump_unsafe would return true for any of the bindings. This is used to avoid looping over all the bindings unnecessarily. */ BOOL_BITFIELD has_jump_unsafe_decl : 1; }; /* The scope currently in effect. */ static GTY(()) struct c_scope *current_scope; /* The innermost function scope. Ordinary (not explicitly declared) labels, bindings to error_mark_node, and the lazily-created bindings of __func__ and its friends get this scope. */ static GTY(()) struct c_scope *current_function_scope; /* The C file scope. This is reset for each input translation unit. */ static GTY(()) struct c_scope *file_scope; /* The outermost scope. This is used for all declarations with external linkage, and only these, hence the name. */ static GTY(()) struct c_scope *external_scope; /* A chain of c_scope structures awaiting reuse. */ static GTY((deletable)) struct c_scope *scope_freelist; /* A chain of c_binding structures awaiting reuse. */ static GTY((deletable)) struct c_binding *binding_freelist; /* Append VAR to LIST in scope SCOPE. */ #define SCOPE_LIST_APPEND(scope, list, decl) do { \ struct c_scope *s_ = (scope); \ tree d_ = (decl); \ if (s_->list##_last) \ BLOCK_CHAIN (s_->list##_last) = d_; \ else \ s_->list = d_; \ s_->list##_last = d_; \ } while (0) /* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */ #define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \ struct c_scope *t_ = (tscope); \ struct c_scope *f_ = (fscope); \ if (t_->to##_last) \ BLOCK_CHAIN (t_->to##_last) = f_->from; \ else \ t_->to = f_->from; \ t_->to##_last = f_->from##_last; \ } while (0) /* A c_inline_static structure stores details of a static identifier referenced in a definition of a function that may be an inline definition if no subsequent declaration of that function uses "extern" or does not use "inline". */ struct GTY((chain_next ("%h.next"))) c_inline_static { /* The location for a diagnostic. */ location_t location; /* The function that may be an inline definition. */ tree function; /* The object or function referenced. */ tree static_decl; /* What sort of reference this is. */ enum c_inline_static_type type; /* The next such structure or NULL. */ struct c_inline_static *next; }; /* List of static identifiers used or referenced in functions that may be inline definitions. */ static GTY(()) struct c_inline_static *c_inline_statics; /* True means unconditionally make a BLOCK for the next scope pushed. */ static bool keep_next_level_flag; /* True means the next call to push_scope will be the outermost scope of a function body, so do not push a new scope, merely cease expecting parameter decls. */ static bool next_is_function_body; /* A vector of pointers to c_binding structures. */ typedef struct c_binding *c_binding_ptr; /* Information that we keep for a struct or union while it is being parsed. */ struct c_struct_parse_info { /* If warn_cxx_compat, a list of types defined within this struct. */ vec<tree> struct_types; /* If warn_cxx_compat, a list of field names which have bindings, and which are defined in this struct, but which are not defined in any enclosing struct. This is used to clear the in_struct field of the c_bindings structure. */ vec<c_binding_ptr> fields; /* If warn_cxx_compat, a list of typedef names used when defining fields in this struct. */ vec<tree> typedefs_seen; }; /* Information for the struct or union currently being parsed, or NULL if not parsing a struct or union. */ static struct c_struct_parse_info *struct_parse_info; /* Forward declarations. */ static tree lookup_name_in_scope (tree, struct c_scope *); static tree c_make_fname_decl (location_t, tree, int); static tree grokdeclarator (const struct c_declarator *, struct c_declspecs *, enum decl_context, bool, tree *, tree *, tree *, bool *, enum deprecated_states); static tree grokparms (struct c_arg_info *, bool); static void layout_array_type (tree); static void warn_defaults_to (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); /* T is a statement. Add it to the statement-tree. This is the C/ObjC version--C++ has a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ if (!building_stmt_list_p ()) push_stmt_list (); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Build a pointer type using the default pointer mode. */ static tree c_build_pointer_type (tree to_type) { addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC : TYPE_ADDR_SPACE (to_type); machine_mode pointer_mode; if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode) pointer_mode = targetm.addr_space.pointer_mode (as); else pointer_mode = c_default_pointer_mode; return build_pointer_type_for_mode (to_type, pointer_mode, false); } /* Return true if we will want to say something if a goto statement crosses DECL. */ static bool decl_jump_unsafe (tree decl) { if (error_operand_p (decl)) return false; /* Always warn about crossing variably modified types. */ if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == TYPE_DECL) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) return true; /* Otherwise, only warn if -Wgoto-misses-init and this is an initialized automatic decl. */ if (warn_jump_misses_init && TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl) && DECL_INITIAL (decl) != NULL_TREE) return true; return false; } void c_print_identifier (FILE *file, tree node, int indent) { void (*save) (enum c_oracle_request, tree identifier); /* Temporarily hide any binding oracle. Without this, calls to debug_tree from the debugger will end up calling into the oracle, making for a confusing debug session. As the oracle isn't needed here for normal operation, it's simplest to suppress it. */ save = c_binding_oracle; c_binding_oracle = NULL; print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4); print_node (file, "tag", I_TAG_DECL (node), indent + 4); print_node (file, "label", I_LABEL_DECL (node), indent + 4); if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN) { tree rid = ridpointers[C_RID_CODE (node)]; indent_to (file, indent + 4); fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"", (void *) rid, IDENTIFIER_POINTER (rid)); } c_binding_oracle = save; } /* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL, which may be any of several kinds of DECL or TYPE or error_mark_node, in the scope SCOPE. */ static void bind (tree name, tree decl, struct c_scope *scope, bool invisible, bool nested, location_t locus) { struct c_binding *b, **here; if (binding_freelist) { b = binding_freelist; binding_freelist = b->prev; } else b = ggc_alloc<c_binding> (); b->shadowed = 0; b->decl = decl; b->id = name; b->depth = scope->depth; b->invisible = invisible; b->nested = nested; b->inner_comp = 0; b->in_struct = 0; b->locus = locus; b->u.type = NULL; b->prev = scope->bindings; scope->bindings = b; if (decl_jump_unsafe (decl)) scope->has_jump_unsafe_decl = 1; if (!name) return; switch (TREE_CODE (decl)) { case LABEL_DECL: here = &I_LABEL_BINDING (name); break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: here = &I_TAG_BINDING (name); break; case VAR_DECL: case FUNCTION_DECL: case TYPE_DECL: case CONST_DECL: case PARM_DECL: case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break; default: gcc_unreachable (); } /* Locate the appropriate place in the chain of shadowed decls to insert this binding. Normally, scope == current_scope and this does nothing. */ while (*here && (*here)->depth > scope->depth) here = &(*here)->shadowed; b->shadowed = *here; *here = b; } /* Clear the binding structure B, stick it on the binding_freelist, and return the former value of b->prev. This is used by pop_scope and get_parm_info to iterate destructively over all the bindings from a given scope. */ static struct c_binding * free_binding_and_advance (struct c_binding *b) { struct c_binding *prev = b->prev; memset (b, 0, sizeof (struct c_binding)); b->prev = binding_freelist; binding_freelist = b; return prev; } /* Bind a label. Like bind, but skip fields which aren't used for labels, and add the LABEL_VARS value. */ static void bind_label (tree name, tree label, struct c_scope *scope, struct c_label_vars *label_vars) { struct c_binding *b; bind (name, label, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); scope->has_label_bindings = true; b = scope->bindings; gcc_assert (b->decl == label); label_vars->shadowed = b->u.label; b->u.label = label_vars; } /* Hook called at end of compilation to assume 1 elt for a file-scope tentative array defn that wasn't complete before. */ void c_finish_incomplete_decl (tree decl) { if (TREE_CODE (decl) == VAR_DECL) { tree type = TREE_TYPE (decl); if (type != error_mark_node && TREE_CODE (type) == ARRAY_TYPE && !DECL_EXTERNAL (decl) && TYPE_DOMAIN (type) == 0) { warning_at (DECL_SOURCE_LOCATION (decl), 0, "array %q+D assumed to have one element", decl); complete_array_type (&TREE_TYPE (decl), NULL_TREE, true); relayout_decl (decl); } } } /* Record that inline function FUNC contains a reference (location LOC) to static DECL (file-scope or function-local according to TYPE). */ void record_inline_static (location_t loc, tree func, tree decl, enum c_inline_static_type type) { c_inline_static *csi = ggc_alloc<c_inline_static> (); csi->location = loc; csi->function = func; csi->static_decl = decl; csi->type = type; csi->next = c_inline_statics; c_inline_statics = csi; } /* Check for references to static declarations in inline functions at the end of the translation unit and diagnose them if the functions are still inline definitions. */ static void check_inline_statics (void) { struct c_inline_static *csi; for (csi = c_inline_statics; csi; csi = csi->next) { if (DECL_EXTERNAL (csi->function)) switch (csi->type) { case csi_internal: pedwarn (csi->location, 0, "%qD is static but used in inline function %qD " "which is not static", csi->static_decl, csi->function); break; case csi_modifiable: pedwarn (csi->location, 0, "%q+D is static but declared in inline function %qD " "which is not static", csi->static_decl, csi->function); break; default: gcc_unreachable (); } } c_inline_statics = NULL; } /* Fill in a c_spot_bindings structure. If DEFINING is true, set it for the current state, otherwise set it to uninitialized. */ static void set_spot_bindings (struct c_spot_bindings *p, bool defining) { if (defining) { p->scope = current_scope; p->bindings_in_scope = current_scope->bindings; } else { p->scope = NULL; p->bindings_in_scope = NULL; } p->stmt_exprs = 0; p->left_stmt_expr = false; } /* Update spot bindings P as we pop out of SCOPE. Return true if we should push decls for a label. */ static bool update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p) { if (p->scope != scope) { /* This label or goto is defined in some other scope, or it is a label which is not yet defined. There is nothing to update. */ return false; } /* Adjust the spot bindings to refer to the bindings already defined in the enclosing scope. */ p->scope = scope->outer; p->bindings_in_scope = p->scope->bindings; return true; } /* The Objective-C front-end often needs to determine the current scope. */ void * objc_get_current_scope (void) { return current_scope; } /* The following function is used only by Objective-C. It needs to live here because it accesses the innards of c_scope. */ void objc_mark_locals_volatile (void *enclosing_blk) { struct c_scope *scope; struct c_binding *b; for (scope = current_scope; scope && scope != enclosing_blk; scope = scope->outer) { for (b = scope->bindings; b; b = b->prev) objc_volatilize_decl (b->decl); /* Do not climb up past the current function. */ if (scope->function_body) break; } } /* Return true if we are in the global binding level. */ bool global_bindings_p (void) { return current_scope == file_scope; } void keep_next_level (void) { keep_next_level_flag = true; } /* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */ void set_float_const_decimal64 (void) { current_scope->float_const_decimal64 = true; } /* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */ void clear_float_const_decimal64 (void) { current_scope->float_const_decimal64 = false; } /* Return nonzero if an unsuffixed float constant is _Decimal64. */ bool float_const_decimal64_p (void) { return current_scope->float_const_decimal64; } /* Identify this scope as currently being filled with parameters. */ void declare_parm_level (void) { current_scope->parm_flag = true; } void push_scope (void) { if (next_is_function_body) { /* This is the transition from the parameters to the top level of the function body. These are the same scope (C99 6.2.1p4,6) so we do not push another scope structure. next_is_function_body is set only by store_parm_decls, which in turn is called when and only when we are about to encounter the opening curly brace for the function body. The outermost block of a function always gets a BLOCK node, because the debugging output routines expect that each function has at least one BLOCK. */ current_scope->parm_flag = false; current_scope->function_body = true; current_scope->keep = true; current_scope->outer_function = current_function_scope; current_function_scope = current_scope; keep_next_level_flag = false; next_is_function_body = false; /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope->outer) current_scope->float_const_decimal64 = current_scope->outer->float_const_decimal64; else current_scope->float_const_decimal64 = false; } else { struct c_scope *scope; if (scope_freelist) { scope = scope_freelist; scope_freelist = scope->outer; } else scope = ggc_cleared_alloc<c_scope> (); /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope) scope->float_const_decimal64 = current_scope->float_const_decimal64; else scope->float_const_decimal64 = false; scope->keep = keep_next_level_flag; scope->outer = current_scope; scope->depth = current_scope ? (current_scope->depth + 1) : 0; /* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but possible. */ if (current_scope && scope->depth == 0) { scope->depth--; sorry ("GCC supports only %u nested scopes", scope->depth); } current_scope = scope; keep_next_level_flag = false; } } /* This is called when we are leaving SCOPE. For each label defined in SCOPE, add any appropriate decls to its decls_in_scope fields. These are the decls whose initialization will be skipped by a goto later in the function. */ static void update_label_decls (struct c_scope *scope) { struct c_scope *s; s = scope; while (s != NULL) { if (s->has_label_bindings) { struct c_binding *b; for (b = s->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; struct c_binding *b1; bool hjud; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; b1 = label_vars->label_bindings.bindings_in_scope; if (label_vars->label_bindings.scope == NULL) hjud = false; else hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl; if (update_spot_bindings (scope, &label_vars->label_bindings)) { /* This label is defined in this scope. */ if (hjud) { for (; b1 != NULL; b1 = b1->prev) { /* A goto from later in the function to this label will never see the initialization of B1, if any. Save it to issue a warning if needed. */ if (decl_jump_unsafe (b1->decl)) vec_safe_push(label_vars->decls_in_scope, b1->decl); } } } /* Update the bindings of any goto statements associated with this label. */ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) update_spot_bindings (scope, &g->goto_bindings); } } /* Don't search beyond the current function. */ if (s == current_function_scope) break; s = s->outer; } } /* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */ static void set_type_context (tree type, tree context) { for (type = TYPE_MAIN_VARIANT (type); type; type = TYPE_NEXT_VARIANT (type)) TYPE_CONTEXT (type) = context; } /* Exit a scope. Restore the state of the identifier-decl mappings that were in effect when this scope was entered. Return a BLOCK node containing all the DECLs in this scope that are of interest to debug info generation. */ tree pop_scope (void) { struct c_scope *scope = current_scope; tree block, context, p; struct c_binding *b; bool functionbody = scope->function_body; bool keep = functionbody || scope->keep || scope->bindings; update_label_decls (scope); /* If appropriate, create a BLOCK to record the decls for the life of this function. */ block = 0; if (keep) { block = make_node (BLOCK); BLOCK_SUBBLOCKS (block) = scope->blocks; TREE_USED (block) = 1; /* In each subblock, record that this is its superior. */ for (p = scope->blocks; p; p = BLOCK_CHAIN (p)) BLOCK_SUPERCONTEXT (p) = block; BLOCK_VARS (block) = 0; } /* The TYPE_CONTEXTs for all of the tagged types belonging to this scope must be set so that they point to the appropriate construct, i.e. either to the current FUNCTION_DECL node, or else to the BLOCK node we just constructed. Note that for tagged types whose scope is just the formal parameter list for some function type specification, we can't properly set their TYPE_CONTEXTs here, because we don't have a pointer to the appropriate FUNCTION_TYPE node readily available to us. For those cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set in `grokdeclarator' as soon as we have created the FUNCTION_TYPE node which will represent the "scope" for these "parameter list local" tagged types. */ if (scope->function_body) context = current_function_decl; else if (scope == file_scope) { tree file_decl = build_translation_unit_decl (NULL_TREE); context = file_decl; debug_hooks->register_main_translation_unit (file_decl); } else context = block; /* Clear all bindings in this scope. */ for (b = scope->bindings; b; b = free_binding_and_advance (b)) { p = b->decl; switch (TREE_CODE (p)) { case LABEL_DECL: /* Warnings for unused labels, errors for undefined labels. */ if (TREE_USED (p) && !DECL_INITIAL (p)) { error ("label %q+D used but not defined", p); DECL_INITIAL (p) = error_mark_node; } else warn_for_unused_label (p); /* Labels go in BLOCK_VARS. */ DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; gcc_assert (I_LABEL_BINDING (b->id) == b); I_LABEL_BINDING (b->id) = b->shadowed; /* Also pop back to the shadowed label_vars. */ release_tree_vector (b->u.label->decls_in_scope); b->u.label = b->u.label->shadowed; break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: set_type_context (p, context); /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } break; case FUNCTION_DECL: /* Propagate TREE_ADDRESSABLE from nested functions to their containing functions. */ if (!TREE_ASM_WRITTEN (p) && DECL_INITIAL (p) != 0 && TREE_ADDRESSABLE (p) && DECL_ABSTRACT_ORIGIN (p) != 0 && DECL_ABSTRACT_ORIGIN (p) != p) TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1; if (!DECL_EXTERNAL (p) && !DECL_INITIAL (p) && scope != file_scope && scope != external_scope) { error ("nested function %q+D declared but never defined", p); undef_nested_function = true; } else if (DECL_DECLARED_INLINE_P (p) && TREE_PUBLIC (p) && !DECL_INITIAL (p)) { /* C99 6.7.4p6: "a function with external linkage... declared with an inline function specifier ... shall also be defined in the same translation unit." */ if (!flag_gnu89_inline && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p)) && scope != external_scope) pedwarn (input_location, 0, "inline function %q+D declared but never defined", p); DECL_EXTERNAL (p) = 1; } goto common_symbol; case VAR_DECL: /* Warnings for unused variables. */ if ((!TREE_USED (p) || !DECL_READ_P (p)) && !TREE_NO_WARNING (p) && !DECL_IN_SYSTEM_HEADER (p) && DECL_NAME (p) && !DECL_ARTIFICIAL (p) && scope != file_scope && scope != external_scope) { if (!TREE_USED (p)) warning (OPT_Wunused_variable, "unused variable %q+D", p); else if (DECL_CONTEXT (p) == current_function_decl) warning_at (DECL_SOURCE_LOCATION (p), OPT_Wunused_but_set_variable, "variable %qD set but not used", p); } if (b->inner_comp) { error ("type of array %q+D completed incompatibly with" " implicit initialization", p); } /* Fall through. */ case TYPE_DECL: case CONST_DECL: common_symbol: /* All of these go in BLOCK_VARS, but only if this is the binding in the home scope. */ if (!b->nested) { DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; } else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope) { /* For block local externs add a special DECL_EXTERNAL decl for debug info generation. */ tree extp = copy_node (p); DECL_EXTERNAL (extp) = 1; TREE_STATIC (extp) = 0; TREE_PUBLIC (extp) = 1; DECL_INITIAL (extp) = NULL_TREE; DECL_LANG_SPECIFIC (extp) = NULL; DECL_CONTEXT (extp) = current_function_decl; if (TREE_CODE (p) == FUNCTION_DECL) { DECL_RESULT (extp) = NULL_TREE; DECL_SAVED_TREE (extp) = NULL_TREE; DECL_STRUCT_FUNCTION (extp) = NULL; } if (b->locus != UNKNOWN_LOCATION) DECL_SOURCE_LOCATION (extp) = b->locus; DECL_CHAIN (extp) = BLOCK_VARS (block); BLOCK_VARS (block) = extp; } /* If this is the file scope set DECL_CONTEXT of each decl to the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p work. */ if (scope == file_scope) { DECL_CONTEXT (p) = context; if (TREE_CODE (p) == TYPE_DECL && TREE_TYPE (p) != error_mark_node) set_type_context (TREE_TYPE (p), context); } /* Fall through. */ /* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have already been put there by store_parm_decls. Unused- parameter warnings are handled by function.c. error_mark_node obviously does not go in BLOCK_VARS and does not get unused-variable warnings. */ case PARM_DECL: case ERROR_MARK: /* It is possible for a decl not to have a name. We get here with b->id NULL in this case. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } break; default: gcc_unreachable (); } } /* Dispose of the block that we just made inside some higher level. */ if ((scope->function_body || scope == file_scope) && context) { DECL_INITIAL (context) = block; BLOCK_SUPERCONTEXT (block) = context; } else if (scope->outer) { if (block) SCOPE_LIST_APPEND (scope->outer, blocks, block); /* If we did not make a block for the scope just exited, any blocks made for inner scopes must be carried forward so they will later become subblocks of something else. */ else if (scope->blocks) SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks); } /* Pop the current scope, and free the structure for reuse. */ current_scope = scope->outer; if (scope->function_body) current_function_scope = scope->outer_function; memset (scope, 0, sizeof (struct c_scope)); scope->outer = scope_freelist; scope_freelist = scope; return block; } void push_file_scope (void) { tree decl; if (file_scope) return; push_scope (); file_scope = current_scope; start_fname_decls (); for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl)) bind (DECL_NAME (decl), decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); } void pop_file_scope (void) { /* In case there were missing closebraces, get us back to the global binding level. */ while (current_scope != file_scope) pop_scope (); /* __FUNCTION__ is defined at file scope (""). This call may not be necessary as my tests indicate it still works without it. */ finish_fname_decls (); check_inline_statics (); /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { c_common_write_pch (); return; } /* Pop off the file scope and close this translation unit. */ pop_scope (); file_scope = 0; maybe_apply_pending_pragma_weaks (); } /* Adjust the bindings for the start of a statement expression. */ void c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; ++label_vars->label_bindings.stmt_exprs; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) ++g->goto_bindings.stmt_exprs; } } if (switch_bindings != NULL) ++switch_bindings->stmt_exprs; } /* Adjust the bindings for the end of a statement expression. */ void c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; --label_vars->label_bindings.stmt_exprs; if (label_vars->label_bindings.stmt_exprs < 0) { label_vars->label_bindings.left_stmt_expr = true; label_vars->label_bindings.stmt_exprs = 0; } FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { --g->goto_bindings.stmt_exprs; if (g->goto_bindings.stmt_exprs < 0) { g->goto_bindings.left_stmt_expr = true; g->goto_bindings.stmt_exprs = 0; } } } } if (switch_bindings != NULL) { --switch_bindings->stmt_exprs; gcc_assert (switch_bindings->stmt_exprs >= 0); } } /* Push a definition or a declaration of struct, union or enum tag "name". "type" should be the type node. We assume that the tag "name" is not already defined, and has a location of LOC. Note that the definition may really be just a forward reference. In that case, the TYPE_SIZE will be zero. */ static void pushtag (location_t loc, tree name, tree type) { /* Record the identifier as the type's name if it has none. */ if (name && !TYPE_NAME (type)) TYPE_NAME (type) = name; bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc); /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the tagged type we just added to the current scope. This fake NULL-named TYPE_DECL node helps dwarfout.c to know when it needs to output a representation of a tagged type, and it also gives us a convenient place to record the "scope start" address for the tagged type. */ TYPE_STUB_DECL (type) = pushdecl (build_decl (loc, TYPE_DECL, NULL_TREE, type)); /* An approximation for now, so we can tell this is a function-scope tag. This will be updated in pop_scope. */ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type)); if (warn_cxx_compat && name != NULL_TREE) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b != NULL && b->decl != NULL_TREE && TREE_CODE (b->decl) == TYPE_DECL && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl)) != TYPE_MAIN_VARIANT (type))) { warning_at (loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), b->decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } } /* An exported interface to pushtag. This is used by the gdb plugin's binding oracle to introduce a new tag binding. */ void c_pushtag (location_t loc, tree name, tree type) { pushtag (loc, name, type); } /* An exported interface to bind a declaration. LOC is the location to use. DECL is the declaration to bind. The decl's name is used to determine how it is bound. If DECL is a VAR_DECL, then IS_GLOBAL determines whether the decl is put into the global (file and external) scope or the current function's scope; if DECL is not a VAR_DECL then it is always put into the file scope. */ void c_bind (location_t loc, tree decl, bool is_global) { struct c_scope *scope; bool nested = false; if (TREE_CODE (decl) != VAR_DECL || current_function_scope == NULL) { /* Types and functions are always considered to be global. */ scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else if (is_global) { /* Also bind it into the external scope. */ bind (DECL_NAME (decl), decl, external_scope, true, false, loc); nested = true; scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else { DECL_CONTEXT (decl) = current_function_decl; TREE_PUBLIC (decl) = 0; scope = current_function_scope; } bind (DECL_NAME (decl), decl, scope, false, nested, loc); } /* Subroutine of compare_decls. Allow harmless mismatches in return and argument types provided that the type modes match. This function return a unified type given a suitable match, and 0 otherwise. */ static tree match_builtin_function_types (tree newtype, tree oldtype) { tree newrettype, oldrettype; tree newargs, oldargs; tree trytype, tryargs; /* Accept the return type of the new declaration if same modes. */ oldrettype = TREE_TYPE (oldtype); newrettype = TREE_TYPE (newtype); if (TYPE_MODE (oldrettype) != TYPE_MODE (newrettype)) return 0; oldargs = TYPE_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); tryargs = newargs; while (oldargs || newargs) { if (!oldargs || !newargs || !TREE_VALUE (oldargs) || !TREE_VALUE (newargs) || TYPE_MODE (TREE_VALUE (oldargs)) != TYPE_MODE (TREE_VALUE (newargs))) return 0; oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); } trytype = build_function_type (newrettype, tryargs); return build_type_attribute_variant (trytype, TYPE_ATTRIBUTES (oldtype)); } /* Subroutine of diagnose_mismatched_decls. Check for function type mismatch involving an empty arglist vs a nonempty one and give clearer diagnostics. */ static void diagnose_arglist_conflict (tree newdecl, tree olddecl, tree newtype, tree oldtype) { tree t; if (TREE_CODE (olddecl) != FUNCTION_DECL || !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype)) || !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == 0) || (!prototype_p (newtype) && DECL_INITIAL (newdecl) == 0))) return; t = TYPE_ARG_TYPES (oldtype); if (t == 0) t = TYPE_ARG_TYPES (newtype); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == 0 && TYPE_MAIN_VARIANT (type) != void_type_node) { inform (input_location, "a parameter list with an ellipsis can%'t match " "an empty parameter name list declaration"); break; } if (c_type_promotes_to (type) != type) { inform (input_location, "an argument type that has a default promotion can%'t match " "an empty parameter name list declaration"); break; } } } /* Another subroutine of diagnose_mismatched_decls. OLDDECL is an old-style function definition, NEWDECL is a prototype declaration. Diagnose inconsistencies in the argument list. Returns TRUE if the prototype is compatible, FALSE if not. */ static bool validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype) { tree newargs, oldargs; int i; #define END_OF_ARGLIST(t) ((t) == void_type_node) oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); i = 1; for (;;) { tree oldargtype = TREE_VALUE (oldargs); tree newargtype = TREE_VALUE (newargs); if (oldargtype == error_mark_node || newargtype == error_mark_node) return false; oldargtype = (TYPE_ATOMIC (oldargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (oldargtype)); newargtype = (TYPE_ATOMIC (newargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (newargtype)); if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype)) break; /* Reaching the end of just one list means the two decls don't agree on the number of arguments. */ if (END_OF_ARGLIST (oldargtype)) { error ("prototype for %q+D declares more arguments " "than previous old-style definition", newdecl); return false; } else if (END_OF_ARGLIST (newargtype)) { error ("prototype for %q+D declares fewer arguments " "than previous old-style definition", newdecl); return false; } /* Type for passing arg must be consistent with that declared for the arg. */ else if (!comptypes (oldargtype, newargtype)) { error ("prototype for %q+D declares argument %d" " with incompatible type", newdecl, i); return false; } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); i++; } /* If we get here, no errors were found, but do issue a warning for this poor-style construct. */ warning (0, "prototype for %q+D follows non-prototype definition", newdecl); return true; #undef END_OF_ARGLIST } /* Subroutine of diagnose_mismatched_decls. Report the location of DECL, first in a pair of mismatched declarations, using the diagnostic function DIAG. */ static void locate_old_decl (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && DECL_BUILT_IN (decl) && !C_DECL_DECLARED_BUILTIN (decl)) ; else if (DECL_INITIAL (decl)) inform (input_location, "previous definition of %q+D was here", decl); else if (C_DECL_IMPLICIT (decl)) inform (input_location, "previous implicit declaration of %q+D was here", decl); else inform (input_location, "previous declaration of %q+D was here", decl); } /* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL. Returns true if the caller should proceed to merge the two, false if OLDDECL should simply be discarded. As a side effect, issues all necessary diagnostics for invalid or poor-style combinations. If it returns true, writes the types of NEWDECL and OLDDECL to *NEWTYPEP and *OLDTYPEP - these may have been adjusted from TREE_TYPE (NEWDECL, OLDDECL) respectively. */ static bool diagnose_mismatched_decls (tree newdecl, tree olddecl, tree *newtypep, tree *oldtypep) { tree newtype, oldtype; bool pedwarned = false; bool warned = false; bool retval = true; #define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \ && DECL_EXTERNAL (DECL)) /* If we have error_mark_node for either decl or type, just discard the previous decl - we're in an error cascade already. */ if (olddecl == error_mark_node || newdecl == error_mark_node) return false; *oldtypep = oldtype = TREE_TYPE (olddecl); *newtypep = newtype = TREE_TYPE (newdecl); if (oldtype == error_mark_node || newtype == error_mark_node) return false; /* Two different categories of symbol altogether. This is an error unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */ if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { if (!(TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))) { error ("%q+D redeclared as different kind of symbol", newdecl); locate_old_decl (olddecl); } else if (TREE_PUBLIC (newdecl)) warning (0, "built-in function %q+D declared as non-function", newdecl); else warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); return false; } /* Enumerators have no linkage, so may only be declared once in a given scope. */ if (TREE_CODE (olddecl) == CONST_DECL) { error ("redeclaration of enumerator %q+D", newdecl); locate_old_decl (olddecl); return false; } if (!comptypes (oldtype, newtype)) { if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) { /* Accept harmless mismatch in function types. This is for the ffs and fprintf builtins. */ tree trytype = match_builtin_function_types (newtype, oldtype); if (trytype && comptypes (newtype, trytype)) *oldtypep = oldtype = trytype; else { /* If types don't match for a built-in, throw away the built-in. No point in calling locate_old_decl here, it won't print anything. */ warning (0, "conflicting types for built-in function %q+D", newdecl); return false; } } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_IS_BUILTIN (olddecl)) { /* A conflicting function declaration for a predeclared function that isn't actually built in. Objective C uses these. The new declaration silently overrides everything but the volatility (i.e. noreturn) indication. See also below. FIXME: Make Objective C use normal builtins. */ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); return false; } /* Permit void foo (...) to match int foo (...) if the latter is the definition and implicit int was used. See c-torture/compile/920625-2.c. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node && C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (newdecl) = *newtypep = newtype = oldtype; C_FUNCTION_IMPLICIT_INT (newdecl) = 0; } /* Permit void foo (...) to match an earlier call to foo (...) with no declared type (thus, implicitly int). */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node && C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype; } else { int new_quals = TYPE_QUALS (newtype); int old_quals = TYPE_QUALS (oldtype); if (new_quals != old_quals) { addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals); addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals); if (new_addr != old_addr) { if (ADDR_SPACE_GENERIC_P (new_addr)) error ("conflicting named address spaces (generic vs %s) " "for %q+D", c_addr_space_name (old_addr), newdecl); else if (ADDR_SPACE_GENERIC_P (old_addr)) error ("conflicting named address spaces (%s vs generic) " "for %q+D", c_addr_space_name (new_addr), newdecl); else error ("conflicting named address spaces (%s vs %s) " "for %q+D", c_addr_space_name (new_addr), c_addr_space_name (old_addr), newdecl); } if (CLEAR_QUAL_ADDR_SPACE (new_quals) != CLEAR_QUAL_ADDR_SPACE (old_quals)) error ("conflicting type qualifiers for %q+D", newdecl); } else error ("conflicting types for %q+D", newdecl); diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype); locate_old_decl (olddecl); return false; } } /* Redeclaration of a type is a constraint violation (6.7.2.3p1), but silently ignore the redeclaration if either is in a system header. (Conflicting redeclarations were handled above.) This is allowed for C11 if the types are the same, not just compatible. */ if (TREE_CODE (newdecl) == TYPE_DECL) { bool types_different = false; int comptypes_result; comptypes_result = comptypes_check_different_types (oldtype, newtype, &types_different); if (comptypes_result != 1 || types_different) { error ("redefinition of typedef %q+D with different type", newdecl); locate_old_decl (olddecl); return false; } if (DECL_IN_SYSTEM_HEADER (newdecl) || DECL_IN_SYSTEM_HEADER (olddecl) || TREE_NO_WARNING (newdecl) || TREE_NO_WARNING (olddecl)) return true; /* Allow OLDDECL to continue in use. */ if (variably_modified_type_p (newtype, NULL)) { error ("redefinition of typedef %q+D with variably modified type", newdecl); locate_old_decl (olddecl); } else if (pedwarn_c99 (input_location, OPT_Wpedantic, "redefinition of typedef %q+D", newdecl)) locate_old_decl (olddecl); return true; } /* Function declarations can either be 'static' or 'extern' (no qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore can never conflict with each other on account of linkage (6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but gnu89 mode permits two definitions if one is 'extern inline' and one is not. The non- extern-inline definition supersedes the extern-inline definition. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If you declare a built-in function name as static, or define the built-in with an old-style definition (so we can't validate the argument list) the built-in definition is overridden, but optionally warn this was a bad choice of name. */ if (DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl) && (!TREE_PUBLIC (newdecl) || (DECL_INITIAL (newdecl) && !prototype_p (TREE_TYPE (newdecl))))) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); /* Discard the old built-in function. */ return false; } if (DECL_INITIAL (newdecl)) { if (DECL_INITIAL (olddecl)) { /* If both decls are in the same TU and the new declaration isn't overriding an extern inline reject the new decl. In c99, no overriding is allowed in the same translation unit. */ if ((!DECL_EXTERN_INLINE (olddecl) || DECL_EXTERN_INLINE (newdecl) || (!flag_gnu89_inline && (!DECL_DECLARED_INLINE_P (olddecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl))) && (!DECL_DECLARED_INLINE_P (newdecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)))) ) && same_translation_unit_p (newdecl, olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } } } /* If we have a prototype after an old-style function definition, the argument types must be checked specially. */ else if (DECL_INITIAL (olddecl) && !prototype_p (oldtype) && prototype_p (newtype) && TYPE_ACTUAL_ARG_TYPES (oldtype) && !validate_proto_after_old_defn (newdecl, newtype, oldtype)) { locate_old_decl (olddecl); return false; } /* A non-static declaration (even an "extern") followed by a static declaration is undefined behavior per C99 6.2.2p3-5,7. The same is true for a static forward declaration at block scope followed by a non-static declaration/definition at file scope. Static followed by non-static at the same scope is not undefined behavior, and is the most convenient way to get some effects (see e.g. what unwind-dw2-fde-glibc.c does to the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but we do diagnose it if -Wtraditional. */ if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl)) { /* Two exceptions to the rule. If olddecl is an extern inline, or a predeclared function that isn't actually built in, newdecl silently overrides olddecl. The latter occur only in Objective C; see also above. (FIXME: Make Objective C use normal builtins.) */ if (!DECL_IS_BUILTIN (olddecl) && !DECL_EXTERN_INLINE (olddecl)) { error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); } return false; } else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl)) { if (DECL_CONTEXT (olddecl)) { error ("non-static declaration of %q+D follows " "static declaration", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } /* Make sure gnu_inline attribute is either not present, or present on all inline decls. */ if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool newa = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) != NULL; bool olda = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl)) != NULL; if (newa != olda) { error_at (input_location, "%<gnu_inline%> attribute present on %q+D", newa ? newdecl : olddecl); error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl), "but not here"); } } } else if (TREE_CODE (newdecl) == VAR_DECL) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl)) { /* Nothing to check. Since OLDDECL is marked threadprivate and NEWDECL does not have a thread-local attribute, we will merge the threadprivate attribute into NEWDECL. */ ; } else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl)) { if (DECL_THREAD_LOCAL_P (newdecl)) error ("thread-local declaration of %q+D follows " "non-thread-local declaration", newdecl); else error ("non-thread-local declaration of %q+D follows " "thread-local declaration", newdecl); locate_old_decl (olddecl); return false; } /* Multiple initialized definitions are not allowed (6.9p3,5). */ if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl)) { error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } /* Objects declared at file scope: if the first declaration had external linkage (even if it was an external reference) the second must have external linkage as well, or the behavior is undefined. If the first declaration had internal linkage, then the second must too, or else be an external reference (in which case the composite declaration still has internal linkage). As for function declarations, we warn about the static-then- extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */ if (DECL_FILE_SCOPE_P (newdecl) && TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl)) { if (DECL_EXTERNAL (newdecl)) { if (!DECL_FILE_SCOPE_P (olddecl)) { error ("extern declaration of %q+D follows " "declaration with no linkage", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } else { if (TREE_PUBLIC (newdecl)) error ("non-static declaration of %q+D follows " "static declaration", newdecl); else error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); return false; } } /* Two objects with the same name declared at the same block scope must both be external references (6.7p3). */ else if (!DECL_FILE_SCOPE_P (newdecl)) { if (DECL_EXTERNAL (newdecl)) { /* Extern with initializer at block scope, which will already have received an error. */ } else if (DECL_EXTERNAL (olddecl)) { error ("declaration of %q+D with no linkage follows " "extern declaration", newdecl); locate_old_decl (olddecl); } else { error ("redeclaration of %q+D with no linkage", newdecl); locate_old_decl (olddecl); } return false; } /* C++ does not permit a decl to appear multiple times at file scope. */ if (warn_cxx_compat && DECL_FILE_SCOPE_P (newdecl) && !DECL_EXTERNAL (newdecl) && !DECL_EXTERNAL (olddecl)) warned |= warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wc___compat, ("duplicate declaration of %qD is " "invalid in C++"), newdecl); } /* warnings */ /* All decls must agree on a visibility. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { warned |= warning (0, "redeclaration of %q+D with different visibility " "(old visibility preserved)", newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* Diagnose inline __attribute__ ((noinline)) which is silly. */ if (DECL_DECLARED_INLINE_P (newdecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "inline declaration of %qD follows " "declaration with attribute noinline", newdecl); else if (DECL_DECLARED_INLINE_P (olddecl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "noinline follows inline declaration ", newdecl); else if (lookup_attribute ("noinline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("always_inline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "noinline", "always_inline"); else if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("noinline", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute " "%qs follows declaration with attribute %qs", newdecl, "always_inline", "noinline"); else if (lookup_attribute ("cold", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("hot", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "cold", "hot"); else if (lookup_attribute ("hot", DECL_ATTRIBUTES (newdecl)) && lookup_attribute ("cold", DECL_ATTRIBUTES (olddecl))) warned |= warning (OPT_Wattributes, "declaration of %q+D with attribute %qs follows " "declaration with attribute %qs", newdecl, "hot", "cold"); } else /* PARM_DECL, VAR_DECL */ { /* Redeclaration of a parameter is a constraint violation (this is not explicitly stated, but follows from C99 6.7p3 [no more than one declaration of the same identifier with no linkage in the same scope, except type tags] and 6.2.2p6 [parameters have no linkage]). We must check for a forward parameter declaration, indicated by TREE_ASM_WRITTEN on the old declaration - this is an extension, the mandatory diagnostic for which is handled by mark_forward_parm_decls. */ if (TREE_CODE (newdecl) == PARM_DECL && (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl))) { error ("redefinition of parameter %q+D", newdecl); locate_old_decl (olddecl); return false; } } /* Optional warning for completely redundant decls. */ if (!warned && !pedwarned && warn_redundant_decls /* Don't warn about a function declaration followed by a definition. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)) /* Don't warn about redundant redeclarations of builtins. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && !DECL_BUILT_IN (newdecl) && DECL_BUILT_IN (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) /* Don't warn about an extern followed by a definition. */ && !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl)) /* Don't warn about forward parameter decls. */ && !(TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) /* Don't warn about a variable definition following a declaration. */ && !(TREE_CODE (newdecl) == VAR_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))) { warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D", newdecl); } /* Report location of previous decl/defn. */ if (warned || pedwarned) locate_old_decl (olddecl); #undef DECL_EXTERN_INLINE return retval; } /* Subroutine of duplicate_decls. NEWDECL has been found to be consistent with OLDDECL, but carries new information. Merge the new information into OLDDECL. This function issues no diagnostics. */ static void merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype) { bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != 0); bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (newdecl))); bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (olddecl))); /* For real parm decl following a forward decl, rechain the old decl in its new location and clear TREE_ASM_WRITTEN (it's not a forward decl anymore). */ if (TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) { struct c_binding *b, **here; for (here = &current_scope->bindings; *here; here = &(*here)->prev) if ((*here)->decl == olddecl) goto found; gcc_unreachable (); found: b = *here; *here = b->prev; b->prev = current_scope->bindings; current_scope->bindings = b; TREE_ASM_WRITTEN (olddecl) = 0; } DECL_ATTRIBUTES (newdecl) = targetm.merge_decl_attributes (olddecl, newdecl); /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = newtype; newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) TYPE_ALIGN (newtype) = TYPE_ALIGN (tem); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } /* Merge the data types specified in the two decls. */ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = composite_type (newtype, oldtype); /* Lay the type out, unless already done. */ if (!comptypes (oldtype, TREE_TYPE (newdecl))) { if (TREE_TYPE (newdecl) != error_mark_node) layout_type (TREE_TYPE (newdecl)); if (TREE_CODE (newdecl) != FUNCTION_DECL && TREE_CODE (newdecl) != TYPE_DECL && TREE_CODE (newdecl) != CONST_DECL) layout_decl (newdecl, 0); } else { /* Since the type is OLDDECL's, make OLDDECL's size go with. */ DECL_SIZE (newdecl) = DECL_SIZE (olddecl); DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl); DECL_MODE (newdecl) = DECL_MODE (olddecl); if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { DECL_ALIGN (newdecl) = DECL_ALIGN (olddecl); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } } /* Keep the old rtl since we can safely use it. */ if (HAS_RTL_P (olddecl)) COPY_DECL_RTL (olddecl, newdecl); /* Merge the type qualifiers. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* If a decl is in a system header and the other isn't, keep the one on the system header. Otherwise, keep source location of definition rather than declaration and of prototype rather than non-prototype unless that prototype is built-in. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (olddecl) && !DECL_IN_SYSTEM_HEADER (newdecl) ) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (newdecl) && !DECL_IN_SYSTEM_HEADER (olddecl)) DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl); else if ((DECL_INITIAL (newdecl) == 0 && DECL_INITIAL (olddecl) != 0) || (old_is_prototype && !new_is_prototype && !C_DECL_BUILTIN_PROTOTYPE (olddecl))) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == 0) DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); /* Merge the threadprivate attribute. */ if (TREE_CODE (olddecl) == VAR_DECL && C_DECL_THREADPRIVATE_P (olddecl)) C_DECL_THREADPRIVATE_P (newdecl) = 1; if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)) { /* Copy the assembler name. Currently, it can only be defined in the prototype. */ COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Use visibility of whichever declaration had it specified */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); DECL_IS_OPERATOR_NEW (newdecl) |= DECL_IS_OPERATOR_NEW (olddecl); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); /* For functions, static overrides non-static. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl); /* This is since we don't automatically copy the attributes of NEWDECL into OLDDECL. */ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); /* If this clears `static', clear it in the identifier too. */ if (!TREE_PUBLIC (olddecl)) TREE_PUBLIC (DECL_NAME (olddecl)) = 0; } } /* In c99, 'extern' declaration before (or after) 'inline' means this function is not DECL_EXTERNAL, unless 'gnu_inline' attribute is present. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && !flag_gnu89_inline && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && (!DECL_DECLARED_INLINE_P (newdecl) || !DECL_DECLARED_INLINE_P (olddecl) || !DECL_EXTERNAL (olddecl)) && DECL_EXTERNAL (newdecl) && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) && !current_function_decl) DECL_EXTERNAL (newdecl) = 0; /* An inline definition following a static declaration is not DECL_EXTERNAL. */ if (new_is_definition && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && !TREE_PUBLIC (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (DECL_EXTERNAL (newdecl)) { TREE_STATIC (newdecl) = TREE_STATIC (olddecl); DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl); /* An extern decl does not override previous storage class. */ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); if (!DECL_EXTERNAL (newdecl)) { DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); } } else { TREE_STATIC (olddecl) = TREE_STATIC (newdecl); TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If we're redefining a function previously defined as extern inline, make sure we emit debug info for the inline before we throw it away, in case it was inlined into a function that hasn't been written out yet. */ if (new_is_definition && DECL_INITIAL (olddecl)) /* The new defn must not be inline. */ DECL_UNINLINABLE (newdecl) = 1; else { /* If either decl says `inline', this fn is inline, unless its definition was passed already. */ if (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) DECL_DECLARED_INLINE_P (newdecl) = 1; DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } if (DECL_BUILT_IN (olddecl)) { /* If redeclaring a builtin function, it stays built in. But it gets tagged as having been declared. */ DECL_BUILT_IN_CLASS (newdecl) = DECL_BUILT_IN_CLASS (olddecl); DECL_FUNCTION_CODE (newdecl) = DECL_FUNCTION_CODE (olddecl); C_DECL_DECLARED_BUILTIN (newdecl) = 1; if (new_is_prototype) { C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0; if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } } } else C_DECL_BUILTIN_PROTOTYPE (newdecl) = C_DECL_BUILTIN_PROTOTYPE (olddecl); } /* Preserve function specific target and optimization options */ if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); /* Also preserve various other info from the definition. */ if (!new_is_definition) { tree t; DECL_RESULT (newdecl) = DECL_RESULT (olddecl); DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl)); for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = newdecl; /* See if we've got a function to instantiate from. */ if (DECL_SAVED_TREE (olddecl)) DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ABSTRACT_ORIGIN (olddecl); } } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (TREE_CODE (olddecl) == VAR_DECL || TREE_CODE (olddecl) == PARM_DECL) DECL_READ_P (newdecl) |= DECL_READ_P (olddecl); if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Copy most of the decl-specific fields of NEWDECL into OLDDECL. But preserve OLDDECL's DECL_UID, DECL_CONTEXT and DECL_ARGUMENTS (if appropriate). */ { unsigned olddecl_uid = DECL_UID (olddecl); tree olddecl_context = DECL_CONTEXT (olddecl); tree olddecl_arguments = NULL; if (TREE_CODE (olddecl) == FUNCTION_DECL) olddecl_arguments = DECL_ARGUMENTS (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); switch (TREE_CODE (olddecl)) { case FUNCTION_DECL: case VAR_DECL: { struct symtab_node *snode = olddecl->decl_with_vis.symtab_node; memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); olddecl->decl_with_vis.symtab_node = snode; if ((DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) && DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); /* This isn't quite correct for something like int __thread x attribute ((tls_model ("local-exec"))); extern int __thread x; as we'll lose the "local-exec" model. */ if (TREE_CODE (olddecl) == VAR_DECL && DECL_THREAD_LOCAL_P (newdecl)) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); break; } case FIELD_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: case CONST_DECL: case TYPE_DECL: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common)); } DECL_UID (olddecl) = olddecl_uid; DECL_CONTEXT (olddecl) = olddecl_context; if (TREE_CODE (olddecl) == FUNCTION_DECL) DECL_ARGUMENTS (olddecl) = olddecl_arguments; } /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (TREE_CODE (olddecl) == VAR_DECL && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); } /* Handle when a new declaration NEWDECL has the same name as an old one OLDDECL in the same binding contour. Prints an error message if appropriate. If safely possible, alter OLDDECL to look like NEWDECL, and return true. Otherwise, return false. */ static bool duplicate_decls (tree newdecl, tree olddecl) { tree newtype = NULL, oldtype = NULL; if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype)) { /* Avoid `unused variable' and other warnings for OLDDECL. */ TREE_NO_WARNING (olddecl) = 1; return false; } merge_decls (newdecl, olddecl, newtype, oldtype); /* The NEWDECL will no longer be needed. Before releasing the node, be sure to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between NEWDECL and OLDECL. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (TREE_CODE (newdecl) == FUNCTION_DECL || TREE_CODE (newdecl) == VAR_DECL) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } ggc_free (newdecl); return true; } /* Check whether decl-node NEW_DECL shadows an existing declaration. */ static void warn_if_shadowing (tree new_decl) { struct c_binding *b; /* Shadow warnings wanted? */ if (!warn_shadow /* No shadow warnings for internally generated vars. */ || DECL_IS_BUILTIN (new_decl) /* No shadow warnings for vars made for inlining. */ || DECL_FROM_INLINE (new_decl)) return; /* Is anything being shadowed? Invisible decls do not count. */ for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed) if (b->decl && b->decl != new_decl && !b->invisible && (b->decl == error_mark_node || diagnostic_report_warnings_p (global_dc, DECL_SOURCE_LOCATION (b->decl)))) { tree old_decl = b->decl; bool warned = false; if (old_decl == error_mark_node) { warning (OPT_Wshadow, "declaration of %q+D shadows previous " "non-variable", new_decl); break; } else if (TREE_CODE (old_decl) == PARM_DECL) warned = warning (OPT_Wshadow, "declaration of %q+D shadows a parameter", new_decl); else if (DECL_FILE_SCOPE_P (old_decl)) { /* Do not warn if a variable shadows a function, unless the variable is a function or a pointer-to-function. */ if (TREE_CODE (old_decl) == FUNCTION_DECL && TREE_CODE (new_decl) != FUNCTION_DECL && !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl))) continue; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow, "declaration of %qD shadows a global " "declaration", new_decl); } else if (TREE_CODE (old_decl) == FUNCTION_DECL && DECL_BUILT_IN (old_decl)) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", new_decl); break; } else warned = warning (OPT_Wshadow, "declaration of %q+D shadows a " "previous local", new_decl); if (warned) inform (DECL_SOURCE_LOCATION (old_decl), "shadowed declaration is here"); break; } } /* Record a decl-node X as belonging to the current lexical scope. Check for errors (such as an incompatible declaration for the same name already seen in the same scope). Returns either X or an old decl for the same name. If an old decl is returned, it may have been smashed to agree with what X says. */ tree pushdecl (tree x) { tree name = DECL_NAME (x); struct c_scope *scope = current_scope; struct c_binding *b; bool nested = false; location_t locus = DECL_SOURCE_LOCATION (x); /* Must set DECL_CONTEXT for everything not at file scope or DECL_FILE_SCOPE_P won't work. Local externs don't count unless they have initializers (which generate code). */ if (current_function_decl && ((TREE_CODE (x) != FUNCTION_DECL && TREE_CODE (x) != VAR_DECL) || DECL_INITIAL (x) || !DECL_EXTERNAL (x))) DECL_CONTEXT (x) = current_function_decl; /* Anonymous decls are just inserted in the scope. */ if (!name) { bind (name, x, scope, /*invisible=*/false, /*nested=*/false, locus); return x; } /* First, see if there is another declaration with the same name in the current scope. If there is, duplicate_decls may do all the work for us. If duplicate_decls returns false, that indicates two incompatible decls in the same scope; we are to silently replace the old one (duplicate_decls has issued all appropriate diagnostics). In particular, we should not consider possible duplicates in the external scope, or shadowing. */ b = I_SYMBOL_BINDING (name); if (b && B_IN_SCOPE (b, scope)) { struct c_binding *b_ext, *b_use; tree type = TREE_TYPE (x); tree visdecl = b->decl; tree vistype = TREE_TYPE (visdecl); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && COMPLETE_TYPE_P (TREE_TYPE (x))) b->inner_comp = false; b_use = b; b_ext = b; /* If this is an external linkage declaration, we should check for compatibility with the type in the external scope before setting the type at this scope based on the visible information only. */ if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl)) { while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { b_use = b_ext; if (b_use->u.type) TREE_TYPE (b_use->decl) = b_use->u.type; } } if (duplicate_decls (x, b_use->decl)) { if (b_use != b) { /* Save the updated type in the external scope and restore the proper type for this scope. */ tree thistype; if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b_use->decl); b_use->u.type = TREE_TYPE (b_use->decl); if (TREE_CODE (b_use->decl) == FUNCTION_DECL && DECL_BUILT_IN (b_use->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b_use->u.type)); TREE_TYPE (b_use->decl) = thistype; } return b_use->decl; } else goto skip_external_and_shadow_checks; } /* All declarations with external linkage, and all external references, go in the external scope, no matter what scope is current. However, the binding in that scope is ignored for purposes of normal name lookup. A separate binding structure is created in the requested scope; this governs the normal visibility of the symbol. The binding in the externals scope is used exclusively for detecting duplicate declarations of the same object, no matter what scope they are in; this is what we do here. (C99 6.2.7p2: All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined.) */ if (DECL_EXTERNAL (x) || scope == file_scope) { tree type = TREE_TYPE (x); tree vistype = 0; tree visdecl = 0; bool type_saved = false; if (b && !B_IN_EXTERNAL_SCOPE (b) && (TREE_CODE (b->decl) == FUNCTION_DECL || TREE_CODE (b->decl) == VAR_DECL) && DECL_FILE_SCOPE_P (b->decl)) { visdecl = b->decl; vistype = TREE_TYPE (visdecl); } if (scope != file_scope && !DECL_IN_SYSTEM_HEADER (x)) warning (OPT_Wnested_externs, "nested extern declaration of %qD", x); while (b && !B_IN_EXTERNAL_SCOPE (b)) { /* If this decl might be modified, save its type. This is done here rather than when the decl is first bound because the type may change after first binding, through being completed or through attributes being added. If we encounter multiple such decls, only the first should have its type saved; the others will already have had their proper types saved and the types will not have changed as their scopes will not have been re-entered. */ if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved) { b->u.type = TREE_TYPE (b->decl); type_saved = true; } if (B_IN_FILE_SCOPE (b) && TREE_CODE (b->decl) == VAR_DECL && TREE_STATIC (b->decl) && TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (b->decl)) && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { /* Array type completed in inner scope, which should be diagnosed if the completion does not have size 1 and it does not get completed in the file scope. */ b->inner_comp = true; } b = b->shadowed; } /* If a matching external declaration has been found, set its type to the composite of all the types of that declaration. After the consistency checks, it will be reset to the composite of the visible types only. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && b->u.type) TREE_TYPE (b->decl) = b->u.type; /* The point of the same_translation_unit_p check here is, we want to detect a duplicate decl for a construct like foo() { extern bar(); } ... static bar(); but not if they are in different translation units. In any case, the static does not go in the externals scope. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && duplicate_decls (x, b->decl)) { tree thistype; if (vistype) { if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b->decl); } else thistype = type; b->u.type = TREE_TYPE (b->decl); if (TREE_CODE (b->decl) == FUNCTION_DECL && DECL_BUILT_IN (b->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b->u.type)); TREE_TYPE (b->decl) = thistype; bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true, locus); return b->decl; } else if (TREE_PUBLIC (x)) { if (visdecl && !b && duplicate_decls (x, visdecl)) { /* An external declaration at block scope referring to a visible entity with internal linkage. The composite type will already be correct for this scope, so we just need to fall through to make the declaration in this scope. */ nested = true; x = visdecl; } else { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, locus); nested = true; } } } if (TREE_CODE (x) != PARM_DECL) warn_if_shadowing (x); skip_external_and_shadow_checks: if (TREE_CODE (x) == TYPE_DECL) { /* So this is a typedef, set its underlying type. */ set_underlying_type (x); /* If X is a typedef defined in the current function, record it for the purpose of implementing the -Wunused-local-typedefs warning. */ record_locally_defined_typedef (x); } bind (name, x, scope, /*invisible=*/false, nested, locus); /* If x's type is incomplete because it's based on a structure or union which has not yet been fully declared, attach it to that structure or union type, so we can go back and complete the variable declaration later, if the structure or union gets fully declared. If the input is erroneous, we can have error_mark in the type slot (e.g. "f(void a, ...)") - that doesn't count as an incomplete type. */ if (TREE_TYPE (x) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (x))) { tree element = TREE_TYPE (x); while (TREE_CODE (element) == ARRAY_TYPE) element = TREE_TYPE (element); element = TYPE_MAIN_VARIANT (element); if ((TREE_CODE (element) == RECORD_TYPE || TREE_CODE (element) == UNION_TYPE) && (TREE_CODE (x) != TYPE_DECL || TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE) && !COMPLETE_TYPE_P (element)) C_TYPE_INCOMPLETE_VARS (element) = tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element)); } return x; } /* Record X as belonging to file scope. This is used only internally by the Objective-C front end, and is limited to its needs. duplicate_decls is not called; if there is any preexisting decl for this identifier, it is an ICE. */ tree pushdecl_top_level (tree x) { tree name; bool nested = false; gcc_assert (TREE_CODE (x) == VAR_DECL || TREE_CODE (x) == CONST_DECL); name = DECL_NAME (x); gcc_assert (TREE_CODE (x) == CONST_DECL || !I_SYMBOL_BINDING (name)); if (TREE_PUBLIC (x)) { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); nested = true; } if (file_scope) bind (name, x, file_scope, /*invisible=*/false, nested, UNKNOWN_LOCATION); return x; } static void implicit_decl_warning (location_t loc, tree id, tree olddecl) { if (warn_implicit_function_declaration) { bool warned; if (flag_isoc99) warned = pedwarn (loc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE", id); else warned = warning_at (loc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE"), id); if (olddecl && warned) locate_old_decl (olddecl); } } /* This function represents mapping of a function code FCODE to its respective header. */ static const char * header_for_builtin_fn (enum built_in_function fcode) { switch (fcode) { CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ASIN): CASE_FLT_FN (BUILT_IN_ASINH): CASE_FLT_FN (BUILT_IN_ATAN): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_ATAN2): CASE_FLT_FN (BUILT_IN_CBRT): CASE_FLT_FN (BUILT_IN_CEIL): CASE_FLT_FN (BUILT_IN_COPYSIGN): CASE_FLT_FN (BUILT_IN_COS): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_ERF): CASE_FLT_FN (BUILT_IN_ERFC): CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_FABS): CASE_FLT_FN (BUILT_IN_FDIM): CASE_FLT_FN (BUILT_IN_FLOOR): CASE_FLT_FN (BUILT_IN_FMA): CASE_FLT_FN (BUILT_IN_FMAX): CASE_FLT_FN (BUILT_IN_FMIN): CASE_FLT_FN (BUILT_IN_FMOD): CASE_FLT_FN (BUILT_IN_FREXP): CASE_FLT_FN (BUILT_IN_HYPOT): CASE_FLT_FN (BUILT_IN_ILOGB): CASE_FLT_FN (BUILT_IN_LDEXP): CASE_FLT_FN (BUILT_IN_LGAMMA): CASE_FLT_FN (BUILT_IN_LLRINT): CASE_FLT_FN (BUILT_IN_LLROUND): CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOGB): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_MODF): CASE_FLT_FN (BUILT_IN_NAN): CASE_FLT_FN (BUILT_IN_NEARBYINT): CASE_FLT_FN (BUILT_IN_NEXTAFTER): CASE_FLT_FN (BUILT_IN_NEXTTOWARD): CASE_FLT_FN (BUILT_IN_POW): CASE_FLT_FN (BUILT_IN_REMAINDER): CASE_FLT_FN (BUILT_IN_REMQUO): CASE_FLT_FN (BUILT_IN_RINT): CASE_FLT_FN (BUILT_IN_ROUND): CASE_FLT_FN (BUILT_IN_SCALBLN): CASE_FLT_FN (BUILT_IN_SCALBN): CASE_FLT_FN (BUILT_IN_SIN): CASE_FLT_FN (BUILT_IN_SINH): CASE_FLT_FN (BUILT_IN_SINCOS): CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN (BUILT_IN_TAN): CASE_FLT_FN (BUILT_IN_TANH): CASE_FLT_FN (BUILT_IN_TGAMMA): CASE_FLT_FN (BUILT_IN_TRUNC): case BUILT_IN_ISINF: case BUILT_IN_ISNAN: return "<math.h>"; CASE_FLT_FN (BUILT_IN_CABS): CASE_FLT_FN (BUILT_IN_CACOS): CASE_FLT_FN (BUILT_IN_CACOSH): CASE_FLT_FN (BUILT_IN_CARG): CASE_FLT_FN (BUILT_IN_CASIN): CASE_FLT_FN (BUILT_IN_CASINH): CASE_FLT_FN (BUILT_IN_CATAN): CASE_FLT_FN (BUILT_IN_CATANH): CASE_FLT_FN (BUILT_IN_CCOS): CASE_FLT_FN (BUILT_IN_CCOSH): CASE_FLT_FN (BUILT_IN_CEXP): CASE_FLT_FN (BUILT_IN_CIMAG): CASE_FLT_FN (BUILT_IN_CLOG): CASE_FLT_FN (BUILT_IN_CONJ): CASE_FLT_FN (BUILT_IN_CPOW): CASE_FLT_FN (BUILT_IN_CPROJ): CASE_FLT_FN (BUILT_IN_CREAL): CASE_FLT_FN (BUILT_IN_CSIN): CASE_FLT_FN (BUILT_IN_CSINH): CASE_FLT_FN (BUILT_IN_CSQRT): CASE_FLT_FN (BUILT_IN_CTAN): CASE_FLT_FN (BUILT_IN_CTANH): return "<complex.h>"; case BUILT_IN_MEMCHR: case BUILT_IN_MEMCMP: case BUILT_IN_MEMCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: case BUILT_IN_STRCAT: case BUILT_IN_STRCHR: case BUILT_IN_STRCMP: case BUILT_IN_STRCPY: case BUILT_IN_STRCSPN: case BUILT_IN_STRLEN: case BUILT_IN_STRNCAT: case BUILT_IN_STRNCMP: case BUILT_IN_STRNCPY: case BUILT_IN_STRPBRK: case BUILT_IN_STRRCHR: case BUILT_IN_STRSPN: case BUILT_IN_STRSTR: return "<string.h>"; case BUILT_IN_FPRINTF: case BUILT_IN_PUTC: case BUILT_IN_FPUTC: case BUILT_IN_FPUTS: case BUILT_IN_FSCANF: case BUILT_IN_FWRITE: case BUILT_IN_PRINTF: case BUILT_IN_PUTCHAR: case BUILT_IN_PUTS: case BUILT_IN_SCANF: case BUILT_IN_SNPRINTF: case BUILT_IN_SPRINTF: case BUILT_IN_SSCANF: case BUILT_IN_VFPRINTF: case BUILT_IN_VFSCANF: case BUILT_IN_VPRINTF: case BUILT_IN_VSCANF: case BUILT_IN_VSNPRINTF: case BUILT_IN_VSPRINTF: case BUILT_IN_VSSCANF: return "<stdio.h>"; case BUILT_IN_ISALNUM: case BUILT_IN_ISALPHA: case BUILT_IN_ISBLANK: case BUILT_IN_ISCNTRL: case BUILT_IN_ISDIGIT: case BUILT_IN_ISGRAPH: case BUILT_IN_ISLOWER: case BUILT_IN_ISPRINT: case BUILT_IN_ISPUNCT: case BUILT_IN_ISSPACE: case BUILT_IN_ISUPPER: case BUILT_IN_ISXDIGIT: case BUILT_IN_TOLOWER: case BUILT_IN_TOUPPER: return "<ctype.h>"; case BUILT_IN_ISWALNUM: case BUILT_IN_ISWALPHA: case BUILT_IN_ISWBLANK: case BUILT_IN_ISWCNTRL: case BUILT_IN_ISWDIGIT: case BUILT_IN_ISWGRAPH: case BUILT_IN_ISWLOWER: case BUILT_IN_ISWPRINT: case BUILT_IN_ISWPUNCT: case BUILT_IN_ISWSPACE: case BUILT_IN_ISWUPPER: case BUILT_IN_ISWXDIGIT: case BUILT_IN_TOWLOWER: case BUILT_IN_TOWUPPER: return "<wctype.h>"; case BUILT_IN_ABORT: case BUILT_IN_ABS: case BUILT_IN_CALLOC: case BUILT_IN_EXIT: case BUILT_IN_FREE: case BUILT_IN_LABS: case BUILT_IN_LLABS: case BUILT_IN_MALLOC: case BUILT_IN_REALLOC: case BUILT_IN__EXIT2: case BUILT_IN_ALIGNED_ALLOC: return "<stdlib.h>"; case BUILT_IN_IMAXABS: return "<inttypes.h>"; case BUILT_IN_STRFTIME: return "<time.h>"; default: return NULL; } } /* Generate an implicit declaration for identifier FUNCTIONID at LOC as a function of type int (). */ tree implicitly_declare (location_t loc, tree functionid) { struct c_binding *b; tree decl = 0; tree asmspec_tree; for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed) { if (B_IN_SCOPE (b, external_scope)) { decl = b->decl; break; } } if (decl) { if (decl == error_mark_node) return decl; /* FIXME: Objective-C has weird not-really-builtin functions which are supposed to be visible automatically. They wind up in the external scope because they're pushed before the file scope gets created. Catch this here and rebind them into the file scope. */ if (!DECL_BUILT_IN (decl) && DECL_IS_BUILTIN (decl)) { bind (functionid, decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } else { tree newtype = default_function_type; if (b->u.type) TREE_TYPE (decl) = b->u.type; /* Implicit declaration of a function already declared (somehow) in a different scope, or as a built-in. If this is the first time this has happened, warn; then recycle the old declaration but with the new type. */ if (!C_DECL_IMPLICIT (decl)) { implicit_decl_warning (loc, functionid, decl); C_DECL_IMPLICIT (decl) = 1; } if (DECL_BUILT_IN (decl)) { newtype = build_type_attribute_variant (newtype, TYPE_ATTRIBUTES (TREE_TYPE (decl))); if (!comptypes (newtype, TREE_TYPE (decl))) { bool warned = warning_at (loc, 0, "incompatible implicit " "declaration of built-in " "function %qD", decl); /* See if we can hint which header to include. */ const char *header = header_for_builtin_fn (DECL_FUNCTION_CODE (decl)); if (header != NULL && warned) inform (loc, "include %qs or provide a declaration of %qD", header, decl); newtype = TREE_TYPE (decl); } } else { if (!comptypes (newtype, TREE_TYPE (decl))) { error_at (loc, "incompatible implicit declaration of " "function %qD", decl); locate_old_decl (decl); } } b->u.type = TREE_TYPE (decl); TREE_TYPE (decl) = newtype; bind (functionid, decl, current_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } } /* Not seen before. */ decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; C_DECL_IMPLICIT (decl) = 1; implicit_decl_warning (loc, functionid, 0); asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL); if (asmspec_tree) set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree)); /* C89 says implicit declarations are in the innermost block. So we record the decl in the standard fashion. */ decl = pushdecl (decl); /* No need to call objc_check_decl here - it's a function type. */ rest_of_decl_compilation (decl, 0, 0); /* Write a record describing this implicit function declaration to the prototypes file (if requested). */ gen_aux_info_record (decl, 0, 1, 0); /* Possibly apply some default attributes to this implicit declaration. */ decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Issue an error message for a reference to an undeclared variable ID, including a reference to a builtin outside of function-call context. Establish a binding of the identifier to error_mark_node in an appropriate scope, which will suppress further errors for the same identifier. The error message should be given location LOC. */ void undeclared_variable (location_t loc, tree id) { static bool already = false; struct c_scope *scope; if (current_function_decl == 0) { error_at (loc, "%qE undeclared here (not in a function)", id); scope = current_scope; } else { if (!objc_diagnose_private_ivar (id)) error_at (loc, "%qE undeclared (first use in this function)", id); if (!already) { inform (loc, "each undeclared identifier is reported only" " once for each function it appears in"); already = true; } /* If we are parsing old-style parameter decls, current_function_decl will be nonnull but current_function_scope will be null. */ scope = current_function_scope ? current_function_scope : current_scope; } bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of lookup_label, declare_label, define_label: construct a LABEL_DECL with all the proper frills. Also create a struct c_label_vars initialized for the current scope. */ static tree make_label (location_t location, tree name, bool defining, struct c_label_vars **p_label_vars) { tree label = build_decl (location, LABEL_DECL, name, void_type_node); DECL_CONTEXT (label) = current_function_decl; DECL_MODE (label) = VOIDmode; c_label_vars *label_vars = ggc_alloc<c_label_vars> (); label_vars->shadowed = NULL; set_spot_bindings (&label_vars->label_bindings, defining); label_vars->decls_in_scope = make_tree_vector (); label_vars->gotos = NULL; *p_label_vars = label_vars; return label; } /* Get the LABEL_DECL corresponding to identifier NAME as a label. Create one if none exists so far for the current function. This is called when a label is used in a goto expression or has its address taken. */ tree lookup_label (tree name) { tree label; struct c_label_vars *label_vars; if (current_function_scope == 0) { error ("label %qE referenced outside of any function", name); return 0; } /* Use a label already defined or ref'd with this name, but not if it is inherited from a containing function and wasn't declared using __label__. */ label = I_LABEL_DECL (name); if (label && (DECL_CONTEXT (label) == current_function_decl || C_DECLARED_LABEL_FLAG (label))) { /* If the label has only been declared, update its apparent location to point here, for better diagnostics if it turns out not to have been defined. */ if (DECL_INITIAL (label) == NULL_TREE) DECL_SOURCE_LOCATION (label) = input_location; return label; } /* No label binding for that identifier; make one. */ label = make_label (input_location, name, false, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); return label; } /* Issue a warning about DECL for a goto statement at GOTO_LOC going to LABEL. */ static void warn_about_goto (location_t goto_loc, tree label, tree decl) { if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) error_at (goto_loc, "jump into scope of identifier with variably modified type"); else warning_at (goto_loc, OPT_Wjump_misses_init, "jump skips variable initialization"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl); } /* Look up a label because of a goto statement. This is like lookup_label, but also issues any appropriate warnings. */ tree lookup_label_for_goto (location_t loc, tree name) { tree label; struct c_label_vars *label_vars; unsigned int ix; tree decl; label = lookup_label (name); if (label == NULL_TREE) return NULL_TREE; /* If we are jumping to a different function, we can't issue any useful warnings. */ if (DECL_CONTEXT (label) != current_function_decl) { gcc_assert (C_DECLARED_LABEL_FLAG (label)); return label; } label_vars = I_LABEL_BINDING (name)->u.label; /* If the label has not yet been defined, then push this goto on a list for possible later warnings. */ if (label_vars->label_bindings.scope == NULL) { c_goto_bindings *g = ggc_alloc<c_goto_bindings> (); g->loc = loc; set_spot_bindings (&g->goto_bindings, true); vec_safe_push (label_vars->gotos, g); return label; } /* If there are any decls in label_vars->decls_in_scope, then this goto has missed the declaration of the decl. This happens for a case like int i = 1; lab: ... goto lab; Issue a warning or error. */ FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl) warn_about_goto (loc, label, decl); if (label_vars->label_bindings.left_stmt_expr) { error_at (loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } return label; } /* Make a label named NAME in the current function, shadowing silently any that may be inherited from containing functions or containing scopes. This is called for __label__ declarations. */ tree declare_label (tree name) { struct c_binding *b = I_LABEL_BINDING (name); tree label; struct c_label_vars *label_vars; /* Check to make sure that the label hasn't already been declared at this scope */ if (b && B_IN_CURRENT_SCOPE (b)) { error ("duplicate label declaration %qE", name); locate_old_decl (b->decl); /* Just use the previous declaration. */ return b->decl; } label = make_label (input_location, name, false, &label_vars); C_DECLARED_LABEL_FLAG (label) = 1; /* Declared labels go in the current scope. */ bind_label (name, label, current_scope, label_vars); return label; } /* When we define a label, issue any appropriate warnings if there are any gotos earlier in the function which jump to this label. */ static void check_earlier_gotos (tree label, struct c_label_vars* label_vars) { unsigned int ix; struct c_goto_bindings *g; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { struct c_binding *b; struct c_scope *scope; /* We have a goto to this label. The goto is going forward. In g->scope, the goto is going to skip any binding which was defined after g->bindings_in_scope. */ if (g->goto_bindings.scope->has_jump_unsafe_decl) { for (b = g->goto_bindings.scope->bindings; b != g->goto_bindings.bindings_in_scope; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } /* We also need to warn about decls defined in any scopes between the scope of the label and the scope of the goto. */ for (scope = label_vars->label_bindings.scope; scope != g->goto_bindings.scope; scope = scope->outer) { gcc_assert (scope != NULL); if (scope->has_jump_unsafe_decl) { if (scope == label_vars->label_bindings.scope) b = label_vars->label_bindings.bindings_in_scope; else b = scope->bindings; for (; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } } if (g->goto_bindings.stmt_exprs > 0) { error_at (g->loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } } /* Now that the label is defined, we will issue warnings about subsequent gotos to this label when we see them. */ vec_safe_truncate (label_vars->gotos, 0); label_vars->gotos = NULL; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label, if the definition is valid. Otherwise return 0. */ tree define_label (location_t location, tree name) { /* Find any preexisting label with this name. It is an error if that label has already been defined in this function, or if there is a containing function with a declared label with the same name. */ tree label = I_LABEL_DECL (name); if (label && ((DECL_CONTEXT (label) == current_function_decl && DECL_INITIAL (label) != 0) || (DECL_CONTEXT (label) != current_function_decl && C_DECLARED_LABEL_FLAG (label)))) { error_at (location, "duplicate label %qD", label); locate_old_decl (label); return 0; } else if (label && DECL_CONTEXT (label) == current_function_decl) { struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label; /* The label has been used or declared already in this function, but not defined. Update its location to point to this definition. */ DECL_SOURCE_LOCATION (label) = location; set_spot_bindings (&label_vars->label_bindings, true); /* Issue warnings as required about any goto statements from earlier in the function. */ check_earlier_gotos (label, label_vars); } else { struct c_label_vars *label_vars; /* No label binding for that identifier; make one. */ label = make_label (location, name, true, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); } if (!in_system_header_at (input_location) && lookup_name (name)) warning_at (location, OPT_Wtraditional, "traditional C lacks a separate namespace " "for labels, identifier %qE conflicts", name); /* Mark label as having been defined. */ DECL_INITIAL (label) = error_mark_node; return label; } /* Get the bindings for a new switch statement. This is used to issue warnings as appropriate for jumps from the switch to case or default labels. */ struct c_spot_bindings * c_get_switch_bindings (void) { struct c_spot_bindings *switch_bindings; switch_bindings = XNEW (struct c_spot_bindings); set_spot_bindings (switch_bindings, true); return switch_bindings; } void c_release_switch_bindings (struct c_spot_bindings *bindings) { gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr); XDELETE (bindings); } /* This is called at the point of a case or default label to issue warnings about decls as needed. It returns true if it found an error, not just a warning. */ bool c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings, location_t switch_loc, location_t case_loc) { bool saw_error; struct c_scope *scope; saw_error = false; for (scope = current_scope; scope != switch_bindings->scope; scope = scope->outer) { struct c_binding *b; gcc_assert (scope != NULL); if (!scope->has_jump_unsafe_decl) continue; for (b = scope->bindings; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) { if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE)) { saw_error = true; error_at (case_loc, ("switch jumps into scope of identifier with " "variably modified type")); } else warning_at (case_loc, OPT_Wjump_misses_init, "switch jumps over variable initialization"); inform (switch_loc, "switch starts here"); inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here", b->decl); } } } if (switch_bindings->stmt_exprs > 0) { saw_error = true; error_at (case_loc, "switch jumps into statement expression"); inform (switch_loc, "switch starts here"); } return saw_error; } /* Given NAME, an IDENTIFIER_NODE, return the structure (or union or enum) definition for that name. If THISLEVEL_ONLY is nonzero, searches only the current_scope. CODE says which kind of type the caller wants; it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE. If PLOC is not NULL and this returns non-null, it sets *PLOC to the location where the tag was defined. If the wrong kind of type is found, an error is reported. */ static tree lookup_tag (enum tree_code code, tree name, int thislevel_only, location_t *ploc) { struct c_binding *b = I_TAG_BINDING (name); int thislevel = 0; if (!b || !b->decl) return 0; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ if (thislevel_only || TREE_CODE (b->decl) != code) { /* For our purposes, a tag in the external scope is the same as a tag in the file scope. (Primarily relevant to Objective-C and its builtin structure tags, which get pushed before the file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) thislevel = 1; } if (thislevel_only && !thislevel) return 0; if (TREE_CODE (b->decl) != code) { /* Definition isn't the kind we were looking for. */ pending_invalid_xref = name; pending_invalid_xref_location = input_location; /* If in the same binding level as a declaration as a tag of a different type, this must not be allowed to shadow that tag, so give the error immediately. (For example, "struct foo; union foo;" is invalid.) */ if (thislevel) pending_xref_error (); } if (ploc != NULL) *ploc = b->locus; return b->decl; } /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid when used in the `struct foo;' construct for shadowing. */ void pending_xref_error (void) { if (pending_invalid_xref != 0) error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag", pending_invalid_xref); pending_invalid_xref = 0; } /* Look up NAME in the current scope and its superiors in the namespace of variables, functions and typedefs. Return a ..._DECL node of some kind representing its definition, or return 0 if it is undefined. */ tree lookup_name (tree name) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b && !b->invisible) { maybe_record_typedef_use (b->decl); return b->decl; } return 0; } /* Similar to `lookup_name' but look only at the indicated scope. */ static tree lookup_name_in_scope (tree name, struct c_scope *scope) { struct c_binding *b; for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; return 0; } /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *) 0). Initialize the global scope. Make definitions for built-in primitive functions. */ void c_init_decl_processing (void) { location_t save_loc = input_location; /* Initialize reserved words for parser. */ c_parse_init (); current_function_decl = 0; gcc_obstack_init (&parser_obstack); /* Make the externals scope. */ push_scope (); external_scope = current_scope; /* Declarations from c_common_nodes_and_builtins must not be associated with this input file, lest we get differences between using and not using preprocessed headers. */ input_location = BUILTINS_LOCATION; c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ truthvalue_type_node = integer_type_node; truthvalue_true_node = integer_one_node; truthvalue_false_node = integer_zero_node; /* Even in C99, which has a real boolean type. */ pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"), boolean_type_node)); input_location = save_loc; make_fname_decl = c_make_fname_decl; start_fname_decls (); } /* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. As we don't yet implement delayed emission of static data, we mark the decl as emitted so it is not placed in the output. Anything using it must therefore pull out the STRING_CST initializer directly. FIXME. */ static tree c_make_fname_decl (location_t loc, tree id, int type_dep) { const char *name = fname_as_string (type_dep); tree decl, type, init; size_t length = strlen (name); type = build_array_type (char_type_node, build_index_type (size_int (length))); type = c_build_qualified_type (type, TYPE_QUAL_CONST); decl = build_decl (loc, VAR_DECL, id, type); TREE_STATIC (decl) = 1; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; init = build_string (length + 1, name); free (CONST_CAST (char *, name)); TREE_TYPE (init) = type; DECL_INITIAL (decl) = init; TREE_USED (decl) = 1; if (current_function_decl /* For invalid programs like this: void foo() const char* p = __FUNCTION__; the __FUNCTION__ is believed to appear in K&R style function parameter declarator. In that case we still don't have function_scope. */ && (!seen_error () || current_function_scope)) { DECL_CONTEXT (decl) = current_function_decl; bind (id, decl, current_function_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } finish_decl (decl, loc, init, NULL_TREE, NULL_TREE); return decl; } tree c_builtin_function (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); /* Should never be called on a symbol with a preexisting meaning. */ gcc_assert (!I_SYMBOL_BINDING (id)); bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } tree c_builtin_function_ext_scope (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); if (external_scope) bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. */ void shadow_tag (const struct c_declspecs *declspecs) { shadow_tag_warned (declspecs, 0); } /* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning, but no pedwarn. */ void shadow_tag_warned (const struct c_declspecs *declspecs, int warned) { bool found_tag = false; if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p) { tree value = declspecs->type; enum tree_code code = TREE_CODE (value); if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE) /* Used to test also that TYPE_SIZE (value) != 0. That caused warning for `struct foo;' at top level in the file. */ { tree name = TYPE_NAME (value); tree t; found_tag = true; if (declspecs->restrict_p) { error ("invalid use of %<restrict%>"); warned = 1; } if (name == 0) { if (warned != 1 && code != ENUMERAL_TYPE) /* Empty unnamed enum OK */ { pedwarn (input_location, 0, "unnamed struct/union that defines no instances"); warned = 1; } } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->storage_class != csc_none) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with storage class specifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with type qualifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->alignas_p) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with %<_Alignas%> " "does not redeclare tag"); warned = 1; pending_xref_error (); } else { pending_invalid_xref = 0; t = lookup_tag (code, name, 1, NULL); if (t == 0) { t = make_node (code); pushtag (input_location, name, t); } } } else { if (warned != 1 && !in_system_header_at (input_location)) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } } } else if (warned != 1 && !in_system_header_at (input_location) && declspecs->typedef_p) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } pending_invalid_xref = 0; if (declspecs->inline_p) { error ("%<inline%> in empty declaration"); warned = 1; } if (declspecs->noreturn_p) { error ("%<_Noreturn%> in empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_auto) { error ("%<auto%> in file-scope empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_register) { error ("%<register%> in file-scope empty declaration"); warned = 1; } if (!warned && !in_system_header_at (input_location) && declspecs->storage_class != csc_none) { warning (0, "useless storage class specifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->thread_p) { warning (0, "useless %qs in empty declaration", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); warned = 2; } if (!warned && !in_system_header_at (input_location) && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { warning (0, "useless type qualifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->alignas_p) { warning (0, "useless %<_Alignas%> in empty declaration"); warned = 2; } if (warned != 1) { if (!found_tag) pedwarn (input_location, 0, "empty declaration"); } } /* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_* bits. SPECS represents declaration specifiers that the grammar only permits to contain type qualifiers and attributes. */ int quals_from_declspecs (const struct c_declspecs *specs) { int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0) | (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0) | (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0) | (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0) | (ENCODE_QUAL_ADDR_SPACE (specs->address_space))); gcc_assert (!specs->type && !specs->decl_attr && specs->typespec_word == cts_none && specs->storage_class == csc_none && !specs->typedef_p && !specs->explicit_signed_p && !specs->deprecated_p && !specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p && !specs->inline_p && !specs->noreturn_p && !specs->thread_p); return quals; } /* Construct an array declarator. LOC is the location of the beginning of the array (usually the opening brace). EXPR is the expression inside [], or NULL_TREE. QUALS are the type qualifiers inside the [] (to be applied to the pointer to which a parameter array is converted). STATIC_P is true if "static" is inside the [], false otherwise. VLA_UNSPEC_P is true if the array is [*], a VLA of unspecified length which is nevertheless a complete type, false otherwise. The field for the contained declarator is left to be filled in by set_array_declarator_inner. */ struct c_declarator * build_array_declarator (location_t loc, tree expr, struct c_declspecs *quals, bool static_p, bool vla_unspec_p) { struct c_declarator *declarator = XOBNEW (&parser_obstack, struct c_declarator); declarator->id_loc = loc; declarator->kind = cdk_array; declarator->declarator = 0; declarator->u.array.dimen = expr; if (quals) { declarator->u.array.attrs = quals->attrs; declarator->u.array.quals = quals_from_declspecs (quals); } else { declarator->u.array.attrs = NULL_TREE; declarator->u.array.quals = 0; } declarator->u.array.static_p = static_p; declarator->u.array.vla_unspec_p = vla_unspec_p; if (static_p || quals != NULL) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<static%> or type " "qualifiers in parameter array declarators"); if (vla_unspec_p) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<[*]%> array declarators"); if (vla_unspec_p) { if (!current_scope->parm_flag) { /* C99 6.7.5.2p4 */ error_at (loc, "%<[*]%> not allowed in other than " "function prototype scope"); declarator->u.array.vla_unspec_p = false; return NULL; } current_scope->had_vla_unspec = true; } return declarator; } /* Set the contained declarator of an array declarator. DECL is the declarator, as constructed by build_array_declarator; INNER is what appears on the left of the []. */ struct c_declarator * set_array_declarator_inner (struct c_declarator *decl, struct c_declarator *inner) { decl->declarator = inner; return decl; } /* INIT is a constructor that forms DECL's initializer. If the final element initializes a flexible array field, add the size of that initializer to DECL's size. */ static void add_flexible_array_elts_to_size (tree decl, tree init) { tree elt, type; if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init))) return; elt = CONSTRUCTOR_ELTS (init)->last ().value; type = TREE_TYPE (elt); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == NULL_TREE && TYPE_DOMAIN (type) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE) { complete_array_type (&type, elt, false); DECL_SIZE (decl) = size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type)); DECL_SIZE_UNIT (decl) = size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type)); } } /* Decode a "typename", such as "int **", returning a ..._TYPE node. Set *EXPR, if EXPR not NULL, to any expression to be evaluated before the type name, and set *EXPR_CONST_OPERANDS, if EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may appear in a constant expression. */ tree groktypename (struct c_type_name *type_name, tree *expr, bool *expr_const_operands) { tree type; tree attrs = type_name->specs->attrs; type_name->specs->attrs = NULL_TREE; type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME, false, NULL, &attrs, expr, expr_const_operands, DEPRECATED_NORMAL); /* Apply attributes. */ decl_attributes (&type, attrs, 0); return type; } /* Wrapper for decl_attributes that adds some implicit attributes to VAR_DECLs or FUNCTION_DECLs. */ static tree c_decl_attributes (tree *node, tree attributes, int flags) { /* Add implicit "omp declare target" attribute if requested. */ if (current_omp_declare_target_attribute && ((TREE_CODE (*node) == VAR_DECL && (TREE_STATIC (*node) || DECL_EXTERNAL (*node))) || TREE_CODE (*node) == FUNCTION_DECL)) { if (TREE_CODE (*node) == VAR_DECL && ((DECL_CONTEXT (*node) && TREE_CODE (DECL_CONTEXT (*node)) == FUNCTION_DECL) || (current_function_decl && !DECL_EXTERNAL (*node)))) error ("%q+D in block scope inside of declare target directive", *node); else if (TREE_CODE (*node) == VAR_DECL && !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node))) error ("%q+D in declare target directive does not have mappable type", *node); else attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); } return decl_attributes (node, attributes, flags); } /* Decode a declarator in an ordinary declaration or data definition. This is called as soon as the type information and variable name have been parsed, before parsing the initializer if any. Here we create the ..._DECL node, fill in its type, and put it on the list of decls for the current context. The ..._DECL node is returned as the value. Exception: for arrays where the length is not specified, the type is left null, to be filled in by `finish_decl'. Function definitions do not come here; they go to start_function instead. However, external and forward declarations of functions do go through here. Structure field declarations are done by grokfield and not through here. */ tree start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs, bool initialized, tree attributes) { tree decl; tree tem; tree expr = NULL_TREE; enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, NULL, &attributes, &expr, NULL, deprecated_state); if (!decl || decl == error_mark_node) return NULL_TREE; if (expr) add_stmt (fold_convert (void_type_node, expr)); if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl))) warning (OPT_Wmain, "%q+D is usually a function", decl); if (initialized) /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell 'finish_decl' to ignore the initializer once it is parsed. */ switch (TREE_CODE (decl)) { case TYPE_DECL: error ("typedef %qD is initialized (use __typeof__ instead)", decl); initialized = 0; break; case FUNCTION_DECL: error ("function %qD is initialized like a variable", decl); initialized = 0; break; case PARM_DECL: /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */ error ("parameter %qD is initialized", decl); initialized = 0; break; default: /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ /* This can happen if the array size is an undefined macro. We already gave a warning, so we don't need another one. */ if (TREE_TYPE (decl) == error_mark_node) initialized = 0; else if (COMPLETE_TYPE_P (TREE_TYPE (decl))) { /* A complete type is ok if size is fixed. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (decl))) != INTEGER_CST || C_DECL_VARIABLE_SIZE (decl)) { error ("variable-sized object may not be initialized"); initialized = 0; } } else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) { error ("variable %qD has initializer but incomplete type", decl); initialized = 0; } else if (C_DECL_VARIABLE_SIZE (decl)) { /* Although C99 is unclear about whether incomplete arrays of VLAs themselves count as VLAs, it does not make sense to permit them to be initialized given that ordinary VLAs may not be initialized. */ error ("variable-sized object may not be initialized"); initialized = 0; } } if (initialized) { if (current_scope == file_scope) TREE_STATIC (decl) = 1; /* Tell 'pushdecl' this is an initialized decl even though we don't yet have the initializer expression. Also tell 'finish_decl' it may store the real initializer. */ DECL_INITIAL (decl) = error_mark_node; } /* If this is a function declaration, write a record describing it to the prototypes file (if requested). */ if (TREE_CODE (decl) == FUNCTION_DECL) gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl))); /* ANSI specifies that a tentative definition which is not merged with a non-tentative definition behaves exactly like a definition with an initializer equal to zero. (Section 3.7.2) -fno-common gives strict ANSI behavior, though this tends to break a large body of code that grew up without this rule. Thread-local variables are never common, since there's no entrenched body of code to break, and it allows more efficient variable references in the presence of dynamic linking. */ if (TREE_CODE (decl) == VAR_DECL && !initialized && TREE_PUBLIC (decl) && !DECL_THREAD_LOCAL_P (decl) && !flag_no_common) DECL_COMMON (decl) = 1; /* Set attributes here so if duplicate decl, will have proper attributes. */ c_decl_attributes (&decl, attributes, 0); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl)) || current_function_decl)) { if (declspecs->storage_class == csc_auto && current_scope != file_scope) ; else if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl); } if (TREE_CODE (decl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (decl))) { struct c_declarator *ce = declarator; if (ce->kind == cdk_pointer) ce = declarator->declarator; if (ce->kind == cdk_function) { tree args = ce->u.arg_info->parms; for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (type && INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning (OPT_Wattributes, "inline function %q+D given attribute noinline", decl); /* C99 6.7.4p3: An inline definition of a function with external linkage shall not contain a definition of a modifiable object with static storage duration... */ if (TREE_CODE (decl) == VAR_DECL && current_scope != file_scope && TREE_STATIC (decl) && !TREE_READONLY (decl) && DECL_DECLARED_INLINE_P (current_function_decl) && DECL_EXTERNAL (current_function_decl)) record_inline_static (input_location, current_function_decl, decl, csi_modifiable); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL)) objc_check_global_decl (decl); /* Add this decl to the current scope. TEM may equal DECL or it may be a previous decl of the same name. */ tem = pushdecl (decl); if (initialized && DECL_EXTERNAL (tem)) { DECL_EXTERNAL (tem) = 0; TREE_STATIC (tem) = 1; } return tem; } /* Subroutine of finish_decl. TYPE is the type of an uninitialized object DECL or the non-array element type if DECL is an uninitialized array. If that type has a const member, diagnose this. */ static void diagnose_uninitialized_cst_member (tree decl, tree type) { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { tree field_type; if (TREE_CODE (field) != FIELD_DECL) continue; field_type = strip_array_types (TREE_TYPE (field)); if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST) { warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const member in %qT is invalid in C++", strip_array_types (TREE_TYPE (decl))); inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field); } if (TREE_CODE (field_type) == RECORD_TYPE || TREE_CODE (field_type) == UNION_TYPE) diagnose_uninitialized_cst_member (decl, field_type); } } /* Finish processing of a declaration; install its initial value. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT_LOC is the location of the initial value. */ void finish_decl (tree decl, location_t init_loc, tree init, tree origtype, tree asmspec_tree) { tree type; bool was_incomplete = (DECL_SIZE (decl) == 0); const char *asmspec = 0; /* If a name was specified, get the string. */ if ((TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == VAR_DECL) && DECL_FILE_SCOPE_P (decl)) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); if (TREE_CODE (decl) == VAR_DECL && TREE_STATIC (decl) && global_bindings_p ()) /* So decl is a global variable. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); /* If `start_decl' didn't like having an initialization, ignore it now. */ if (init != 0 && DECL_INITIAL (decl) == 0) init = 0; /* Don't crash if parm is initialized. */ if (TREE_CODE (decl) == PARM_DECL) init = 0; if (init) store_init_value (init_loc, decl, init, origtype); if (c_dialect_objc () && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL || TREE_CODE (decl) == FIELD_DECL)) objc_check_decl (decl); type = TREE_TYPE (decl); /* Deduce size of array from initialization, if not already known. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == 0 && TREE_CODE (decl) != TYPE_DECL) { bool do_default = (TREE_STATIC (decl) /* Even if pedantic, an external linkage array may have incomplete type at first. */ ? pedantic && !TREE_PUBLIC (decl) : !DECL_EXTERNAL (decl)); int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), do_default); /* Get the completed type made by complete_array_type. */ type = TREE_TYPE (decl); switch (failure) { case 1: error ("initializer fails to determine size of %q+D", decl); break; case 2: if (do_default) error ("array size missing in %q+D", decl); /* If a `static' var's size isn't known, make it extern as well as static, so it does not get allocated. If it is not `static', then do not mark extern; finish_incomplete_decl will give it a default size and it will get allocated. */ else if (!pedantic && TREE_STATIC (decl) && !TREE_PUBLIC (decl)) DECL_EXTERNAL (decl) = 1; break; case 3: error ("zero or negative size array %q+D", decl); break; case 0: /* For global variables, update the copy of the type that exists in the binding. */ if (TREE_PUBLIC (decl)) { struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl)); while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { if (b_ext->u.type && comptypes (b_ext->u.type, type)) b_ext->u.type = composite_type (b_ext->u.type, type); else b_ext->u.type = type; } } break; default: gcc_unreachable (); } if (DECL_INITIAL (decl)) TREE_TYPE (DECL_INITIAL (decl)) = type; relayout_decl (decl); } if (TREE_CODE (decl) == VAR_DECL) { if (init && TREE_CODE (init) == CONSTRUCTOR) add_flexible_array_elts_to_size (decl, init); if (DECL_SIZE (decl) == 0 && TREE_TYPE (decl) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (decl))) layout_decl (decl, 0); if (DECL_SIZE (decl) == 0 /* Don't give an error if we already gave one earlier. */ && TREE_TYPE (decl) != error_mark_node && (TREE_STATIC (decl) /* A static variable with an incomplete type is an error if it is initialized. Also if it is not file scope. Otherwise, let it through, but if it is not `extern' then it may cause an error message later. */ ? (DECL_INITIAL (decl) != 0 || !DECL_FILE_SCOPE_P (decl)) /* An automatic variable with an incomplete type is an error. */ : !DECL_EXTERNAL (decl))) { error ("storage size of %q+D isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } if ((DECL_EXTERNAL (decl) || TREE_STATIC (decl)) && DECL_SIZE (decl) != 0) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else { error ("storage size of %q+D isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } if (TREE_USED (type)) { TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; } } /* If this is a function and an assembler name is specified, reset DECL_RTL so we can give it its new name. Also, update builtin_decl if it was a normal built-in. */ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec) { if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } /* If #pragma weak was used, mark the decl weak now. */ maybe_apply_pragma_weak (decl); /* Output the assembler code and/or RTL code for variables and functions, unless the type is an undefined structure or union. If not, it will get done when the type is completed. */ if (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL) { /* Determine the ELF visibility. */ if (TREE_PUBLIC (decl)) c_determine_visibility (decl); /* This is a no-op in c-lang.c or something real in objc-act.c. */ if (c_dialect_objc ()) objc_check_decl (decl); if (asmspec) { /* If this is not a static variable, issue a warning. It doesn't make any sense to give an ASMSPEC for an ordinary, non-register local variable. Historically, GCC has accepted -- but ignored -- the ASMSPEC in this case. */ if (!DECL_FILE_SCOPE_P (decl) && TREE_CODE (decl) == VAR_DECL && !C_DECL_REGISTER (decl) && !TREE_STATIC (decl)) warning (0, "ignoring asm-specifier for non-static local " "variable %q+D", decl); else set_user_assembler_name (decl, asmspec); } if (DECL_FILE_SCOPE_P (decl)) { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; if (asmspec && C_DECL_REGISTER (decl)) DECL_HARD_REGISTER (decl) = 1; rest_of_decl_compilation (decl, true, 0); } else { /* In conjunction with an ASMSPEC, the `register' keyword indicates that we should place the variable in a particular register. */ if (asmspec && C_DECL_REGISTER (decl)) { DECL_HARD_REGISTER (decl) = 1; /* This cannot be done for a structure with volatile fields, on which DECL_REGISTER will have been reset. */ if (!DECL_REGISTER (decl)) error ("cannot put object with volatile field into register"); } if (TREE_CODE (decl) != FUNCTION_DECL) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ /* Note that DECL_SIZE can be null due to errors. */ if (DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } } if (!DECL_FILE_SCOPE_P (decl)) { /* Recompute the RTL of a local array now if it used to be an incomplete type. */ if (was_incomplete && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)) { /* If we used it already as memory, it must stay in memory. */ TREE_ADDRESSABLE (decl) = TREE_USED (decl); /* If it's still incomplete now, no init will save it. */ if (DECL_SIZE (decl) == 0) DECL_INITIAL (decl) = 0; } } } if (TREE_CODE (decl) == TYPE_DECL) { if (!DECL_FILE_SCOPE_P (decl) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0); } /* Install a cleanup (aka destructor) if one was given. */ if (TREE_CODE (decl) == VAR_DECL && !TREE_STATIC (decl)) { tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); if (attr) { tree cleanup_id = TREE_VALUE (TREE_VALUE (attr)); tree cleanup_decl = lookup_name (cleanup_id); tree cleanup; vec<tree, va_gc> *v; /* Build "cleanup(&decl)" for the destructor. */ cleanup = build_unary_op (input_location, ADDR_EXPR, decl, 0); vec_alloc (v, 1); v->quick_push (cleanup); cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl), vNULL, cleanup_decl, v, NULL); vec_free (v); /* Don't warn about decl unused; the cleanup uses it. */ TREE_USED (decl) = 1; TREE_USED (cleanup_decl) = 1; DECL_READ_P (decl) = 1; push_cleanup (decl, cleanup, false); } } if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl) && DECL_INITIAL (decl) == NULL_TREE) { type = strip_array_types (type); if (TREE_READONLY (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const %qD is invalid in C++", decl); else if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (type)) diagnose_uninitialized_cst_member (decl, type); } invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* Given a parsed parameter declaration, decode it into a PARM_DECL. EXPR is NULL or a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ tree grokparm (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); return decl; } /* Given a parsed parameter declaration, decode it into a PARM_DECL and push that on the current scope. EXPR is a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ void push_parm_decl (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl; decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); decl = pushdecl (decl); finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE); } /* Mark all the parameter declarations to date as forward decls. Also diagnose use of this extension. */ void mark_forward_parm_decls (void) { struct c_binding *b; if (pedantic && !current_scope->warned_forward_parm_decls) { pedwarn (input_location, OPT_Wpedantic, "ISO C forbids forward parameter declarations"); current_scope->warned_forward_parm_decls = true; } for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL) TREE_ASM_WRITTEN (b->decl) = 1; } /* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound literal, which may be an incomplete array type completed by the initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound literal. NON_CONST is true if the initializers contain something that cannot occur in a constant expression. */ tree build_compound_literal (location_t loc, tree type, tree init, bool non_const) { /* We do not use start_decl here because we have a type, not a declarator; and do not use finish_decl because the decl should be stored inside the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */ tree decl; tree complit; tree stmt; if (type == error_mark_node || init == error_mark_node) return error_mark_node; decl = build_decl (loc, VAR_DECL, NULL_TREE, type); DECL_EXTERNAL (decl) = 0; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = (current_scope == file_scope); DECL_CONTEXT (decl) = current_function_decl; TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; TREE_TYPE (decl) = type; TREE_READONLY (decl) = (TYPE_READONLY (type) || (TREE_CODE (type) == ARRAY_TYPE && TYPE_READONLY (TREE_TYPE (type)))); store_init_value (loc, decl, init, NULL_TREE); if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type)) { int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), true); /* If complete_array_type returns 3, it means that the initial value of the compound literal is empty. Allow it. */ gcc_assert (failure == 0 || failure == 3); type = TREE_TYPE (decl); TREE_TYPE (DECL_INITIAL (decl)) = type; } if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { c_incomplete_type_error (NULL_TREE, type); return error_mark_node; } stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt); TREE_SIDE_EFFECTS (complit) = 1; layout_decl (decl, 0); if (TREE_STATIC (decl)) { /* This decl needs a name for the assembler output. */ set_compound_literal_name (decl); DECL_DEFER_OUTPUT (decl) = 1; DECL_COMDAT (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; pushdecl (decl); rest_of_decl_compilation (decl, 1, 0); } if (non_const) { complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit); C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1; } return complit; } /* Check the type of a compound literal. Here we just check that it is valid for C++. */ void check_compound_literal_type (location_t loc, struct c_type_name *type_name) { if (warn_cxx_compat && (type_name->specs->typespec_kind == ctsk_tagdef || type_name->specs->typespec_kind == ctsk_tagfirstref)) warning_at (loc, OPT_Wc___compat, "defining a type in a compound literal is invalid in C++"); } /* Determine whether TYPE is a structure with a flexible array member, or a union containing such a structure (possibly recursively). */ static bool flexible_array_type_p (tree type) { tree x; switch (TREE_CODE (type)) { case RECORD_TYPE: x = TYPE_FIELDS (type); if (x == NULL_TREE) return false; while (DECL_CHAIN (x) != NULL_TREE) x = DECL_CHAIN (x); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) return true; return false; case UNION_TYPE: for (x = TYPE_FIELDS (type); x != NULL_TREE; x = DECL_CHAIN (x)) { if (flexible_array_type_p (TREE_TYPE (x))) return true; } return false; default: return false; } } /* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME, replacing with appropriate values if they are invalid. */ static void check_bitfield_type_and_width (tree *type, tree *width, tree orig_name) { tree type_mv; unsigned int max_width; unsigned HOST_WIDE_INT w; const char *name = (orig_name ? identifier_to_locale (IDENTIFIER_POINTER (orig_name)) : _("<anonymous>")); /* Detect and ignore out of range field width and process valid field widths. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (*width))) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } else { if (TREE_CODE (*width) != INTEGER_CST) { *width = c_fully_fold (*width, false, NULL); if (TREE_CODE (*width) == INTEGER_CST) pedwarn (input_location, OPT_Wpedantic, "bit-field %qs width not an integer constant expression", name); } if (TREE_CODE (*width) != INTEGER_CST) { error ("bit-field %qs width not an integer constant", name); *width = integer_one_node; } constant_expression_warning (*width); if (tree_int_cst_sgn (*width) < 0) { error ("negative width in bit-field %qs", name); *width = integer_one_node; } else if (integer_zerop (*width) && orig_name) { error ("zero width for bit-field %qs", name); *width = integer_one_node; } } /* Detect invalid bit-field type. */ if (TREE_CODE (*type) != INTEGER_TYPE && TREE_CODE (*type) != BOOLEAN_TYPE && TREE_CODE (*type) != ENUMERAL_TYPE) { error ("bit-field %qs has invalid type", name); *type = unsigned_type_node; } type_mv = TYPE_MAIN_VARIANT (*type); if (!in_system_header_at (input_location) && type_mv != integer_type_node && type_mv != unsigned_type_node && type_mv != boolean_type_node) pedwarn_c90 (input_location, OPT_Wpedantic, "type of bit-field %qs is a GCC extension", name); max_width = TYPE_PRECISION (*type); if (0 < compare_tree_int (*width, max_width)) { error ("width of %qs exceeds its type", name); w = max_width; *width = build_int_cst (integer_type_node, w); } else w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning (0, "%qs is narrower than values of its type", name); } } /* Print warning about variable length array if necessary. */ static void warn_variable_length_array (tree name, tree size) { if (TREE_CONSTANT (size)) { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array %qE whose size " "can%'t be evaluated", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array " "whose size can%'t be evaluated"); } else { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable length array %qE", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable " "length array"); } } /* Print warning about defaulting to int if necessary. */ static void warn_defaults_to (location_t location, int opt, const char *gmsgid, ...) { diagnostic_info diagnostic; va_list ap; va_start (ap, gmsgid); diagnostic_set_info (&diagnostic, gmsgid, &ap, location, flag_isoc99 ? DK_PEDWARN : DK_WARNING); diagnostic.option_index = opt; report_diagnostic (&diagnostic); va_end (ap); } /* Given declspecs and a declarator, determine the name and type of the object declared and construct a ..._DECL node for it. (In one case we can return a ..._TYPE node instead. For invalid input we sometimes return 0.) DECLSPECS is a c_declspecs structure for the declaration specifiers. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. INITIALIZED is true if the decl has an initializer. WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node representing the width of the bit-field. DECL_ATTRS points to the list of attributes that should be added to this decl. Any nested attributes that belong on the decl itself will be added to this list. If EXPR is not NULL, any expressions that need to be evaluated as part of evaluating variably modified types will be stored in *EXPR. If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be set to indicate whether operands in *EXPR can be used in constant expressions. DEPRECATED_STATE is a deprecated_states value indicating whether deprecation warnings should be suppressed. In the TYPENAME case, DECLARATOR is really an absolute declarator. It may also be so in the PARM case, for a prototype where the argument type is specified but not the name. This function is where the complicated C meanings of `static' and `extern' are interpreted. */ static tree grokdeclarator (const struct c_declarator *declarator, struct c_declspecs *declspecs, enum decl_context decl_context, bool initialized, tree *width, tree *decl_attrs, tree *expr, bool *expr_const_operands, enum deprecated_states deprecated_state) { tree type = declspecs->type; bool threadp = declspecs->thread_p; enum c_storage_class storage_class = declspecs->storage_class; int constp; int restrictp; int volatilep; int atomicp; int type_quals = TYPE_UNQUALIFIED; tree name = NULL_TREE; bool funcdef_flag = false; bool funcdef_syntax = false; bool size_varies = false; tree decl_attr = declspecs->decl_attr; int array_ptr_quals = TYPE_UNQUALIFIED; tree array_ptr_attrs = NULL_TREE; int array_parm_static = 0; bool array_parm_vla_unspec_p = false; tree returned_attrs = NULL_TREE; bool bitfield = width != NULL; tree element_type; tree orig_qual_type = NULL; size_t orig_qual_indirect = 0; struct c_arg_info *arg_info = 0; addr_space_t as1, as2, address_space; location_t loc = UNKNOWN_LOCATION; const char *errmsg; tree expr_dummy; bool expr_const_operands_dummy; enum c_declarator_kind first_non_attr_kind; unsigned int alignas_align = 0; if (TREE_CODE (type) == ERROR_MARK) return error_mark_node; if (expr == NULL) expr = &expr_dummy; if (expr_const_operands == NULL) expr_const_operands = &expr_const_operands_dummy; *expr = declspecs->expr; *expr_const_operands = declspecs->expr_const_operands; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; /* Look inside a declarator for the name being declared and get it as an IDENTIFIER_NODE, for an error message. */ { const struct c_declarator *decl = declarator; first_non_attr_kind = cdk_attrs; while (decl) switch (decl->kind) { case cdk_array: loc = decl->id_loc; /* FALL THRU. */ case cdk_function: case cdk_pointer: funcdef_syntax = (decl->kind == cdk_function); if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = decl->declarator; break; case cdk_attrs: decl = decl->declarator; break; case cdk_id: loc = decl->id_loc; if (decl->u.id) name = decl->u.id; if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = 0; break; default: gcc_unreachable (); } if (name == 0) { gcc_assert (decl_context == PARM || decl_context == TYPENAME || (decl_context == FIELD && declarator->kind == cdk_id)); gcc_assert (!initialized); } } /* A function definition's declarator must have the form of a function declarator. */ if (funcdef_flag && !funcdef_syntax) return 0; /* If this looks like a function definition, make it one, even if it occurs where parms are expected. Then store_parm_decls will reject it and not use it as a parm. */ if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag) decl_context = PARM; if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (declspecs->type, declspecs->decl_attr); if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope && variably_modified_type_p (type, NULL_TREE)) { if (name) error_at (loc, "variably modified %qE at file scope", name); else error_at (loc, "variably modified field at file scope"); type = integer_type_node; } size_varies = C_TYPE_VARIABLE_SIZE (type) != 0; /* Diagnose defaulting to "int". */ if (declspecs->default_int_p && !in_system_header_at (input_location)) { /* Issue a warning if this is an ISO C 99 program or if -Wreturn-type and this is a function, or if -Wimplicit; prefer the former warning since it is more explicit. */ if ((warn_implicit_int || warn_return_type || flag_isoc99) && funcdef_flag) warn_about_return_type = 1; else { if (name) warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in declaration " "of %qE", name); else warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in type name"); } } /* Adjust the type if a bit-field is being declared, -funsigned-bitfields applied and the type is not explicitly "signed". */ if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p && TREE_CODE (type) == INTEGER_TYPE) type = unsigned_type_for (type); /* Figure out the type qualifiers for the declaration. There are two ways a declaration can become qualified. One is something like `const int i' where the `const' is explicit. Another is something like `typedef const int CI; CI i' where the type of the declaration contains the `const'. A third possibility is that there is a type qualifier on the element type of a typedefed array type, in which case we should extract that qualifier so that c_apply_type_quals_to_decl receives the full list of qualifiers to work with (C90 is not entirely clear about whether duplicate qualifiers should be diagnosed in this case, but it seems most appropriate to do so). */ element_type = strip_array_types (type); constp = declspecs->const_p + TYPE_READONLY (element_type); restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type); volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type); atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type); as1 = declspecs->address_space; as2 = TYPE_ADDR_SPACE (element_type); address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1; if (constp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>"); if (restrictp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>"); if (volatilep > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>"); if (atomicp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>"); if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2) error_at (loc, "conflicting named address spaces (%s vs %s)", c_addr_space_name (as1), c_addr_space_name (as2)); if ((TREE_CODE (type) == ARRAY_TYPE || first_non_attr_kind == cdk_array) && TYPE_QUALS (element_type)) { orig_qual_type = type; type = TYPE_MAIN_VARIANT (type); } type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0) | (atomicp ? TYPE_QUAL_ATOMIC : 0) | ENCODE_QUAL_ADDR_SPACE (address_space)); if (type_quals != TYPE_QUALS (element_type)) orig_qual_type = NULL_TREE; /* Applying the _Atomic qualifier to an array type (through the use of typedefs or typeof) must be detected here. If the qualifier is introduced later, any appearance of applying it to an array is actually applying it to an element of that array. */ if (atomicp && TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (funcdef_flag && (threadp || storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef)) { if (storage_class == csc_auto) pedwarn (loc, (current_scope == file_scope) ? 0 : OPT_Wpedantic, "function definition declared %<auto%>"); if (storage_class == csc_register) error_at (loc, "function definition declared %<register%>"); if (storage_class == csc_typedef) error_at (loc, "function definition declared %<typedef%>"); if (threadp) error_at (loc, "function definition declared %qs", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; if (storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef) storage_class = csc_none; } else if (decl_context != NORMAL && (storage_class != csc_none || threadp)) { if (decl_context == PARM && storage_class == csc_register) ; else { switch (decl_context) { case FIELD: if (name) error_at (loc, "storage class specified for structure " "field %qE", name); else error_at (loc, "storage class specified for structure field"); break; case PARM: if (name) error_at (loc, "storage class specified for parameter %qE", name); else error_at (loc, "storage class specified for unnamed parameter"); break; default: error_at (loc, "storage class specified for typename"); break; } storage_class = csc_none; threadp = false; } } else if (storage_class == csc_extern && initialized && !funcdef_flag) { /* 'extern' with initialization is invalid if not at file scope. */ if (current_scope == file_scope) { /* It is fine to have 'extern const' when compiling at C and C++ intersection. */ if (!(warn_cxx_compat && constp)) warning_at (loc, 0, "%qE initialized and declared %<extern%>", name); } else error_at (loc, "%qE has both %<extern%> and initializer", name); } else if (current_scope == file_scope) { if (storage_class == csc_auto) error_at (loc, "file-scope declaration of %qE specifies %<auto%>", name); if (pedantic && storage_class == csc_register) pedwarn (input_location, OPT_Wpedantic, "file-scope declaration of %qE specifies %<register%>", name); } else { if (storage_class == csc_extern && funcdef_flag) error_at (loc, "nested function %qE declared %<extern%>", name); else if (threadp && storage_class == csc_none) { error_at (loc, "function-scope %qE implicitly auto and declared " "%qs", name, declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; } } /* Now figure out the structure of the declarator proper. Descend through it, creating more complex types, until we reach the declared identifier (or NULL_TREE, in an absolute declarator). At each stage we maintain an unqualified version of the type together with any qualifiers that should be applied to it with c_build_qualified_type; this way, array types including multidimensional array types are first built up in unqualified form and then the qualified form is created with TYPE_MAIN_VARIANT pointing to the unqualified form. */ while (declarator && declarator->kind != cdk_id) { if (type == error_mark_node) { declarator = declarator->declarator; continue; } /* Each level of DECLARATOR is either a cdk_array (for ...[..]), a cdk_pointer (for *...), a cdk_function (for ...(...)), a cdk_attrs (for nested attributes), or a cdk_id (for the name being declared or the place in an absolute declarator where the name was omitted). For the last case, we have just exited the loop. At this point, TYPE is the type of elements of an array, or for a function to return, or for a pointer to point to. After this sequence of ifs, TYPE is the type of the array or function or pointer, and DECLARATOR has had its outermost layer removed. */ if (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static) { /* Only the innermost declarator (making a parameter be of array type which is converted to pointer type) may have static or type qualifiers. */ error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } switch (declarator->kind) { case cdk_attrs: { /* A declarator with embedded attributes. */ tree attrs = declarator->u.attrs; const struct c_declarator *inner_decl; int attr_flags = 0; declarator = declarator->declarator; inner_decl = declarator; while (inner_decl->kind == cdk_attrs) inner_decl = inner_decl->declarator; if (inner_decl->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; else if (inner_decl->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; else if (inner_decl->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); break; } case cdk_array: { tree itype = NULL_TREE; tree size = declarator->u.array.dimen; /* The index is a signed object `sizetype' bits wide. */ tree index_type = c_common_signed_type (sizetype); array_ptr_quals = declarator->u.array.quals; array_ptr_attrs = declarator->u.array.attrs; array_parm_static = declarator->u.array.static_p; array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p; declarator = declarator->declarator; /* Check for some types that there cannot be arrays of. */ if (VOID_TYPE_P (type)) { if (name) error_at (loc, "declaration of %qE as array of voids", name); else error_at (loc, "declaration of type name as array of voids"); type = error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "declaration of %qE as array of functions", name); else error_at (loc, "declaration of type name as array of " "functions"); type = error_mark_node; } if (pedantic && !in_system_header_at (input_location) && flexible_array_type_p (type)) pedwarn (loc, OPT_Wpedantic, "invalid use of structure with flexible array member"); if (size == error_mark_node) type = error_mark_node; if (type == error_mark_node) continue; /* If size was specified, set ITYPE to a range-type for that size. Otherwise, ITYPE remains null. finish_decl may figure it out from an initial value. */ if (size) { bool size_maybe_const = true; bool size_int_const = (TREE_CODE (size) == INTEGER_CST && !TREE_OVERFLOW (size)); bool this_size_varies = false; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (size); if (!INTEGRAL_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has non-integer type", name); else error_at (loc, "size of unnamed array has non-integer type"); size = integer_one_node; } size = c_fully_fold (size, false, &size_maybe_const); if (pedantic && size_maybe_const && integer_zerop (size)) { if (name) pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array %qE", name); else pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array"); } if (TREE_CODE (size) == INTEGER_CST && size_maybe_const) { constant_expression_warning (size); if (tree_int_cst_sgn (size) < 0) { if (name) error_at (loc, "size of array %qE is negative", name); else error_at (loc, "size of unnamed array is negative"); size = integer_one_node; } /* Handle a size folded to an integer constant but not an integer constant expression. */ if (!size_int_const) { /* If this is a file scope declaration of an ordinary identifier, this is invalid code; diagnosing it here and not subsequently treating the type as variable-length avoids more confusing diagnostics later. */ if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) pedwarn (input_location, 0, "variably modified %qE at file scope", name); else this_size_varies = size_varies = true; warn_variable_length_array (name, size); } } else if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) { error_at (loc, "variably modified %qE at file scope", name); size = integer_one_node; } else { /* Make sure the array size remains visibly nonconstant even if it is (eg) a const variable with known value. */ this_size_varies = size_varies = true; warn_variable_length_array (name, size); if (flag_sanitize & SANITIZE_VLA && decl_context == NORMAL && do_ubsan_in_current_function ()) { /* Evaluate the array size only once. */ size = c_save_expr (size); size = c_fully_fold (size, false, NULL); size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size), ubsan_instrument_vla (loc, size), size); } } if (integer_zerop (size) && !this_size_varies) { /* A zero-length array cannot be represented with an unsigned index type, which is what we'll get with build_index_type. Create an open-ended range instead. */ itype = build_range_type (sizetype, size, NULL_TREE); } else { /* Arrange for the SAVE_EXPR on the inside of the MINUS_EXPR, which allows the -1 to get folded with the +1 that happens when building TYPE_SIZE. */ if (size_varies) size = save_expr (size); if (this_size_varies && TREE_CODE (size) == INTEGER_CST) size = build2 (COMPOUND_EXPR, TREE_TYPE (size), integer_zero_node, size); /* Compute the maximum valid index, that is, size - 1. Do the calculation in index_type, so that if it is a variable the computations will be done in the proper mode. */ itype = fold_build2_loc (loc, MINUS_EXPR, index_type, convert (index_type, size), convert (index_type, size_one_node)); /* The above overflows when size does not fit in index_type. ??? While a size of INT_MAX+1 technically shouldn't cause an overflow (because we subtract 1), handling this case seems like an unnecessary complication. */ if (TREE_CODE (size) == INTEGER_CST && !int_fits_type_p (size, index_type)) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); type = error_mark_node; continue; } itype = build_index_type (itype); } if (this_size_varies) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (size), *expr, size); else *expr = size; *expr_const_operands &= size_maybe_const; } } else if (decl_context == FIELD) { bool flexible_array_member = false; if (array_parm_vla_unspec_p) /* Field names can in fact have function prototype scope so [*] is disallowed here through making the field variably modified, not through being something other than a declaration with function prototype scope. */ size_varies = true; else { const struct c_declarator *t = declarator; while (t->kind == cdk_attrs) t = t->declarator; flexible_array_member = (t->kind == cdk_id); } if (flexible_array_member && !in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); /* ISO C99 Flexible array members are effectively identical to GCC's zero-length array extension. */ if (flexible_array_member || array_parm_vla_unspec_p) itype = build_range_type (sizetype, size_zero_node, NULL_TREE); } else if (decl_context == PARM) { if (array_parm_vla_unspec_p) { itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } else if (decl_context == TYPENAME) { if (array_parm_vla_unspec_p) { /* C99 6.7.5.2p4 */ warning (0, "%<[*]%> not in a declaration"); /* We use this to avoid messing up with incomplete array types of the same type, that would otherwise be modified below. */ itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } /* Complain about arrays of incomplete types. */ if (!COMPLETE_TYPE_P (type)) { error_at (loc, "array type has incomplete element type %qT", type); type = error_mark_node; } else /* When itype is NULL, a shared incomplete array type is returned for all array of a given type. Elsewhere we make sure we don't complete that type before copying it, but here we want to make sure we don't ever modify the shared type, so we gcc_assert (itype) below. */ { addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type)) type = build_qualified_type (type, ENCODE_QUAL_ADDR_SPACE (as)); type = build_array_type (type, itype); } if (type != error_mark_node) { if (size_varies) { /* It is ok to modify type here even if itype is NULL: if size_varies, we're in a multi-dimensional array and the inner type has variable size, so the enclosing shared array type must too. */ if (size && TREE_CODE (size) == INTEGER_CST) type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); C_TYPE_VARIABLE_SIZE (type) = 1; } /* The GCC extension for zero-length arrays differs from ISO flexible array members in that sizeof yields zero. */ if (size && integer_zerop (size)) { gcc_assert (itype); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (array_parm_vla_unspec_p) { gcc_assert (itype); /* The type is complete. C99 6.7.5.2p4 */ type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } } if (decl_context != PARM && (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static)) { error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = 0; } orig_qual_indirect++; break; } case cdk_function: { /* Say it's a definition only for the declarator closest to the identifier, apart possibly from some attributes. */ bool really_funcdef = false; tree arg_types; orig_qual_type = NULL_TREE; if (funcdef_flag) { const struct c_declarator *t = declarator->declarator; while (t->kind == cdk_attrs) t = t->declarator; really_funcdef = (t->kind == cdk_id); } /* Declaring a function type. Make sure we have a valid type for the function to return. */ if (type == error_mark_node) continue; size_varies = false; /* Warn about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "%qE declared as function returning a " "function", name); else error_at (loc, "type name declared as function " "returning a function"); type = integer_type_node; } if (TREE_CODE (type) == ARRAY_TYPE) { if (name) error_at (loc, "%qE declared as function returning an array", name); else error_at (loc, "type name declared as function returning " "an array"); type = integer_type_node; } errmsg = targetm.invalid_return_type (type); if (errmsg) { error (errmsg); type = integer_type_node; } /* Construct the function type and go to the next inner layer of declarator. */ arg_info = declarator->u.arg_info; arg_types = grokparms (arg_info, really_funcdef); /* Type qualifiers before the return type of the function qualify the return type, not the function type. */ if (type_quals) { /* Type qualifiers on a function return type are normally permitted by the standard but have no effect, so give a warning at -Wreturn-type. Qualifiers on a void return type are banned on function definitions in ISO C; GCC used to used them for noreturn functions. */ if (VOID_TYPE_P (type) && really_funcdef) pedwarn (loc, 0, "function definition has qualified void return type"); else warning_at (loc, OPT_Wignored_qualifiers, "type qualifiers ignored on function return type"); type = c_build_qualified_type (type, type_quals); } type_quals = TYPE_UNQUALIFIED; type = build_function_type (type, arg_types); declarator = declarator->declarator; /* Set the TYPE_CONTEXTs for each tagged type which is local to the formal parameter list of this FUNCTION_TYPE to point to the FUNCTION_TYPE node itself. */ { c_arg_tag *tag; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) TYPE_CONTEXT (tag->type) = type; } break; } case cdk_pointer: { /* Merge any constancy or volatility into the target type for the pointer. */ if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); orig_qual_type = NULL_TREE; size_varies = false; /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We handle the NORMAL and FIELD contexts here by attaching an artificial TYPE_DECL to such pointed-to type. This forces the sizes evaluation at a safe point and ensures it is not deferred until e.g. within a deeper conditional context. We expect nothing to be needed here for PARM or TYPENAME. Pushing a TYPE_DECL at this point for TYPENAME would actually be incorrect, as we might be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the fake TYPE_DECL in the enclosing context would force the size evaluation prior to the side effects. */ if (!TYPE_NAME (type) && (decl_context == NORMAL || decl_context == FIELD) && variably_modified_type_p (type, NULL_TREE)) { tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl) = 1; pushdecl (decl); finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE); TYPE_NAME (type) = decl; } type = c_build_pointer_type (type); /* Process type qualifiers (such as const or volatile) that were given inside the `*'. */ type_quals = declarator->u.pointer_quals; declarator = declarator->declarator; break; } default: gcc_unreachable (); } } *decl_attrs = chainon (returned_attrs, *decl_attrs); /* Now TYPE has the actual type, apart from any qualifiers in TYPE_QUALS. */ /* Warn about address space used for things other than static memory or pointers. */ address_space = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (address_space)) { if (decl_context == NORMAL) { switch (storage_class) { case csc_auto: error ("%qs combined with %<auto%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_register: error ("%qs combined with %<register%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_none: if (current_function_scope) { error ("%qs specified for auto variable %qE", c_addr_space_name (address_space), name); break; } break; case csc_static: case csc_extern: case csc_typedef: break; default: gcc_unreachable (); } } else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE) { if (name) error ("%qs specified for parameter %qE", c_addr_space_name (address_space), name); else error ("%qs specified for unnamed parameter", c_addr_space_name (address_space)); } else if (decl_context == FIELD) { if (name) error ("%qs specified for structure field %qE", c_addr_space_name (address_space), name); else error ("%qs specified for structure field", c_addr_space_name (address_space)); } } /* Check the type and width of a bit-field. */ if (bitfield) { check_bitfield_type_and_width (&type, width, name); /* C11 makes it implementation-defined (6.7.2.1#5) whether atomic types are permitted for bit-fields; we have no code to make bit-field accesses atomic, so disallow them. */ if (type_quals & TYPE_QUAL_ATOMIC) { if (name) error ("bit-field %qE has atomic type", name); else error ("bit-field has atomic type"); type_quals &= ~TYPE_QUAL_ATOMIC; } } /* Reject invalid uses of _Alignas. */ if (declspecs->alignas_p) { if (storage_class == csc_typedef) error_at (loc, "alignment specified for typedef %qE", name); else if (storage_class == csc_register) error_at (loc, "alignment specified for %<register%> object %qE", name); else if (decl_context == PARM) { if (name) error_at (loc, "alignment specified for parameter %qE", name); else error_at (loc, "alignment specified for unnamed parameter"); } else if (bitfield) { if (name) error_at (loc, "alignment specified for bit-field %qE", name); else error_at (loc, "alignment specified for unnamed bit-field"); } else if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "alignment specified for function %qE", name); else if (declspecs->align_log != -1) { alignas_align = 1U << declspecs->align_log; if (alignas_align < min_align_of_type (type)) { if (name) error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of %qE", name); else error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of unnamed field"); alignas_align = 0; } } } /* Did array size calculations overflow or does the array cover more than half of the address-space? */ if (TREE_CODE (type) == ARRAY_TYPE && COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST && ! valid_constant_size_p (TYPE_SIZE_UNIT (type))) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); /* If we proceed with the array type as it is, we'll eventually crash in tree_to_[su]hwi(). */ type = error_mark_node; } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (storage_class == csc_typedef) { tree decl; if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, TYPE_DECL, declarator->u.id, type); if (declspecs->explicit_signed_p) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl); if (warn_cxx_compat && declarator->u.id != NULL_TREE) { struct c_binding *b = I_TAG_BINDING (declarator->u.id); if (b != NULL && b->decl != NULL_TREE && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type)) { warning_at (declarator->id_loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), decl); if (b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } return decl; } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids const or volatile function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); return type; } if (pedantic && decl_context == FIELD && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.2.1p8 */ pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot " "have a variably modified type"); } /* Aside from typedefs and type names (handle above), `void' at top level (not within pointer) is allowed only in public variables. We don't complain about parms either, but that is because a better error message can be made later. */ if (VOID_TYPE_P (type) && decl_context != PARM && !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE) && (storage_class == csc_extern || (current_scope == file_scope && !(storage_class == csc_static || storage_class == csc_register))))) { error_at (loc, "variable or field %qE declared void", name); type = integer_type_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ { tree decl; if (decl_context == PARM) { tree promoted_type; bool array_parameter_p = false; /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = TREE_TYPE (type); if (orig_qual_type != NULL_TREE) { if (orig_qual_indirect == 0) orig_qual_type = TREE_TYPE (orig_qual_type); else orig_qual_indirect--; } if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); type = c_build_pointer_type (type); type_quals = array_ptr_quals; if (type_quals) type = c_build_qualified_type (type, type_quals); /* We don't yet implement attributes in this context. */ if (array_ptr_attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "attributes in parameter array declarator ignored"); size_varies = false; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type = c_build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; } else if (type_quals) type = c_build_qualified_type (type, type_quals); decl = build_decl (declarator->id_loc, PARM_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; C_ARRAY_PARAMETER (decl) = array_parameter_p; /* Compute the type actually passed in the parmlist, for the case where there is no prototype. (For example, shorts and chars are passed as ints.) When there is a prototype, this is overridden later. */ if (type == error_mark_node) promoted_type = type; else promoted_type = c_type_promotes_to (type); DECL_ARG_TYPE (decl) = promoted_type; if (declspecs->inline_p) pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl); } else if (decl_context == FIELD) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); /* Structure field. It may not be a function. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "field %qE declared as a function", name); type = build_pointer_type (type); } else if (TREE_CODE (type) != ERROR_MARK && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type)) { if (name) error_at (loc, "field %qE has incomplete type", name); else error_at (loc, "unnamed field has incomplete type"); type = error_mark_node; } else if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { /* We have a flexible array member through a typedef. Set suitable range. Whether this is a correct position for a flexible array member will be determined elsewhere. */ if (!in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node, NULL_TREE); } type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, FIELD_DECL, declarator->u.id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !declarator->u.id) TREE_NO_WARNING (decl) = 1; if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (storage_class == csc_register || threadp) { error_at (loc, "invalid storage class for function %qE", name); } else if (current_scope != file_scope) { /* Function declaration not at file scope. Storage classes other than `extern' are not allowed, C99 6.7.1p5, and `extern' makes no difference. However, GCC allows 'auto', perhaps with 'inline', to support nested functions. */ if (storage_class == csc_auto) pedwarn (loc, OPT_Wpedantic, "invalid storage class for function %qE", name); else if (storage_class == csc_static) { error_at (loc, "invalid storage class for function %qE", name); if (funcdef_flag) storage_class = declspecs->storage_class = csc_none; else return 0; } } decl = build_decl (declarator->id_loc, FUNCTION_DECL, declarator->u.id, type); decl = build_decl_attribute_variant (decl, decl_attr); if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); /* Every function declaration is an external reference (DECL_EXTERNAL) except for those which are not at file scope and are explicitly declared "auto". This is forbidden by standard C (C99 6.7.1p5) and is interpreted by GCC to signify a forward declaration of a nested function. */ if (storage_class == csc_auto && current_scope != file_scope) DECL_EXTERNAL (decl) = 0; /* In C99, a function which is declared 'inline' with 'extern' is not an external reference (which is confusing). It means that the later definition of the function must be output in this file, C99 6.7.4p6. In GNU C89, a function declared 'extern inline' is an external reference. */ else if (declspecs->inline_p && storage_class != csc_static) DECL_EXTERNAL (decl) = ((storage_class == csc_extern) == flag_gnu89_inline); else DECL_EXTERNAL (decl) = !initialized; /* Record absence of global scope for `static' or `auto'. */ TREE_PUBLIC (decl) = !(storage_class == csc_static || storage_class == csc_auto); /* For a function definition, record the argument information block where store_parm_decls will look for it. */ if (funcdef_flag) current_function_arg_info = arg_info; if (declspecs->default_int_p) C_FUNCTION_IMPLICIT_INT (decl) = 1; /* Record presence of `inline' and `_Noreturn', if it is reasonable. */ if (flag_hosted && MAIN_NAME_P (declarator->u.id)) { if (declspecs->inline_p) pedwarn (loc, 0, "cannot inline function %<main%>"); if (declspecs->noreturn_p) pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>"); } else { if (declspecs->inline_p) /* Record that the function is declared `inline'. */ DECL_DECLARED_INLINE_P (decl) = 1; if (declspecs->noreturn_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Noreturn%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Noreturn%>"); TREE_THIS_VOLATILE (decl) = 1; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ int extern_ref = !initialized && storage_class == csc_extern; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); /* C99 6.2.2p7: It is invalid (compile-time undefined behavior) to create an 'extern' declaration for a variable if there is a global declaration that is 'static' and the global declaration is not visible. (If the static declaration _is_ currently visible, the 'extern' declaration is taken to refer to that decl.) */ if (extern_ref && current_scope != file_scope) { tree global_decl = identifier_global_value (declarator->u.id); tree visible_decl = lookup_name (declarator->u.id); if (global_decl && global_decl != visible_decl && TREE_CODE (global_decl) == VAR_DECL && !TREE_PUBLIC (global_decl)) error_at (loc, "variable previously declared %<static%> " "redeclared %<extern%>"); } decl = build_decl (declarator->id_loc, VAR_DECL, declarator->u.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl); /* At file scope, an initialized extern declaration may follow a static declaration. In that case, DECL_EXTERNAL will be reset later in start_decl. */ DECL_EXTERNAL (decl) = (storage_class == csc_extern); /* At file scope, the presence of a `static' or `register' storage class specifier, or the absence of all storage class specifiers makes this declaration a definition (perhaps tentative). Also, the absence of `static' makes it public. */ if (current_scope == file_scope) { TREE_PUBLIC (decl) = storage_class != csc_static; TREE_STATIC (decl) = !extern_ref; } /* Not at file scope, only `static' makes a static definition. */ else { TREE_STATIC (decl) = (storage_class == csc_static); TREE_PUBLIC (decl) = extern_ref; } if (threadp) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if ((storage_class == csc_extern || (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE && !funcdef_flag)) && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.5.2p2 */ if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "non-nested function with variably modified type"); else error_at (loc, "object with variably modified type must have " "no linkage"); } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == csc_register) { C_DECL_REGISTER (decl) = 1; DECL_REGISTER (decl) = 1; } /* Record constancy and volatility. */ c_apply_type_quals_to_decl (type_quals, decl); /* Apply _Alignas specifiers. */ if (alignas_align) { DECL_ALIGN (decl) = alignas_align * BITS_PER_UNIT; DECL_USER_ALIGN (decl) = 1; } /* If a type has volatile components, it should be stored in memory. Otherwise, the fact that those components are volatile will be ignored, and would even crash the compiler. Of course, this only makes sense on VAR,PARM, and RESULT decl's. */ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)) && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL)) { /* It is not an error for a structure with volatile fields to be declared register, but reset DECL_REGISTER since it cannot actually go in a register. */ int was_reg = C_DECL_REGISTER (decl); C_DECL_REGISTER (decl) = 0; DECL_REGISTER (decl) = 0; c_mark_addressable (decl); C_DECL_REGISTER (decl) = was_reg; } /* This is the earliest point at which we might know the assembler name of a variable. Thus, if it's known before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl)); if (warn_cxx_compat && TREE_CODE (decl) == VAR_DECL && TREE_PUBLIC (decl) && TREE_STATIC (decl) && (TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, ("non-local variable %qD with anonymous type is " "questionable in C++"), decl); return decl; } } /* Decode the parameter-list info for a function type or function definition. The argument is the value returned by `get_parm_info' (or made in c-parse.c if there is an identifier list instead of a parameter decl list). These two functions are separate because when a function returns or receives functions then each is called multiple times but the order of calls is different. The last call to `grokparms' is always the one that contains the formal parameter names of a function definition. Return a list of arg types to use in the FUNCTION_TYPE for this function. FUNCDEF_FLAG is true for a function definition, false for a mere declaration. A nonempty identifier-list gets an error message when FUNCDEF_FLAG is false. */ static tree grokparms (struct c_arg_info *arg_info, bool funcdef_flag) { tree arg_types = arg_info->types; if (funcdef_flag && arg_info->had_vla_unspec) { /* A function definition isn't function prototype scope C99 6.2.1p4. */ /* C99 6.7.5.2p4 */ error ("%<[*]%> not allowed in other than function prototype scope"); } if (arg_types == 0 && !funcdef_flag && !in_system_header_at (input_location)) warning (OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); if (arg_types == error_mark_node) return 0; /* don't set TYPE_ARG_TYPES in this case */ else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE) { if (!funcdef_flag) { pedwarn (input_location, 0, "parameter names (without types) in function declaration"); arg_info->parms = NULL_TREE; } else arg_info->parms = arg_info->types; arg_info->types = 0; return 0; } else { tree parm, type, typelt; unsigned int parmno; const char *errmsg; /* If there is a parameter of incomplete type in a definition, this is an error. In a declaration this is valid, and a struct or union type may be completed later, before any calls or definition of the function. In the case where the tag was first declared within the parameter list, a warning has already been given. If a parameter has void type, then however the function cannot be defined or called, so warn. */ for (parm = arg_info->parms, typelt = arg_types, parmno = 1; parm; parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++) { type = TREE_VALUE (typelt); if (type == error_mark_node) continue; if (!COMPLETE_TYPE_P (type)) { if (funcdef_flag) { if (DECL_NAME (parm)) error_at (input_location, "parameter %u (%q+D) has incomplete type", parmno, parm); else error_at (DECL_SOURCE_LOCATION (parm), "parameter %u has incomplete type", parmno); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } else if (VOID_TYPE_P (type)) { if (DECL_NAME (parm)) warning_at (input_location, 0, "parameter %u (%q+D) has void type", parmno, parm); else warning_at (DECL_SOURCE_LOCATION (parm), 0, "parameter %u has void type", parmno); } } errmsg = targetm.invalid_parameter_type (type); if (errmsg) { error (errmsg); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } if (DECL_NAME (parm) && TREE_USED (parm)) warn_if_shadowing (parm); } return arg_types; } } /* Allocate and initialize a c_arg_info structure from the parser's obstack. */ struct c_arg_info * build_arg_info (void) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = NULL_TREE; ret->tags = NULL; ret->types = NULL_TREE; ret->others = NULL_TREE; ret->pending_sizes = NULL; ret->had_vla_unspec = 0; return ret; } /* Take apart the current scope and return a c_arg_info structure with info on a parameter list just parsed. This structure is later fed to 'grokparms' and 'store_parm_decls'. ELLIPSIS being true means the argument list ended in '...' so don't append a sentinel (void_list_node) to the end of the type-list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ struct c_arg_info * get_parm_info (bool ellipsis, tree expr) { struct c_binding *b = current_scope->bindings; struct c_arg_info *arg_info = build_arg_info (); tree parms = 0; vec<c_arg_tag, va_gc> *tags = NULL; tree types = 0; tree others = 0; static bool explained_incomplete_types = false; bool gave_void_only_once_err = false; arg_info->had_vla_unspec = current_scope->had_vla_unspec; /* The bindings in this scope must not get put into a block. We will take care of deleting the binding nodes. */ current_scope->bindings = 0; /* This function is only called if there was *something* on the parameter list. */ gcc_assert (b); /* A parameter list consisting solely of 'void' indicates that the function takes no arguments. But if the 'void' is qualified (by 'const' or 'volatile'), or has a storage class specifier ('register'), then the behavior is undefined; issue an error. Typedefs for 'void' are OK (see DR#157). */ if (b->prev == 0 /* one binding */ && TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */ && !DECL_NAME (b->decl) /* anonymous */ && VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */ { if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED || C_DECL_REGISTER (b->decl)) error ("%<void%> as only parameter may not be qualified"); /* There cannot be an ellipsis. */ if (ellipsis) error ("%<void%> must be the only parameter"); arg_info->types = void_list_node; return arg_info; } if (!ellipsis) types = void_list_node; /* Break up the bindings list into parms, tags, types, and others; apply sanity checks; purge the name-to-decl bindings. */ while (b) { tree decl = b->decl; tree type = TREE_TYPE (decl); c_arg_tag tag; const char *keyword; switch (TREE_CODE (decl)) { case PARM_DECL: if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } /* Check for forward decls that never got their actual decl. */ if (TREE_ASM_WRITTEN (decl)) error ("parameter %q+D has just a forward declaration", decl); /* Check for (..., void, ...) and issue an error. */ else if (VOID_TYPE_P (type) && !DECL_NAME (decl)) { if (!gave_void_only_once_err) { error ("%<void%> must be the only parameter"); gave_void_only_once_err = true; } } else { /* Valid parameter, add it to the list. */ DECL_CHAIN (decl) = parms; parms = decl; /* Since there is a prototype, args are passed in their declared types. The back end may override this later. */ DECL_ARG_TYPE (decl) = type; types = tree_cons (0, type, types); } break; case ENUMERAL_TYPE: keyword = "enum"; goto tag; case UNION_TYPE: keyword = "union"; goto tag; case RECORD_TYPE: keyword = "struct"; goto tag; tag: /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } /* Warn about any struct, union or enum tags defined in a parameter list. The scope of such types is limited to the parameter list, which is rarely if ever desirable (it's impossible to call such a function with type- correct arguments). An anonymous union parm type is meaningful as a GNU extension, so don't warn for that. */ if (TREE_CODE (decl) != UNION_TYPE || b->id != 0) { if (b->id) /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "%<%s %E%> declared inside parameter list", keyword, b->id); else /* The %s will be one of 'struct', 'union', or 'enum'. */ warning (0, "anonymous %s declared inside parameter list", keyword); if (!explained_incomplete_types) { warning (0, "its scope is only this definition or declaration," " which is probably not what you want"); explained_incomplete_types = true; } } tag.id = b->id; tag.type = decl; vec_safe_push (tags, tag); break; case CONST_DECL: case TYPE_DECL: case FUNCTION_DECL: /* CONST_DECLs appear here when we have an embedded enum, and TYPE_DECLs appear here when we have an embedded struct or union. No warnings for this - we already warned about the type itself. FUNCTION_DECLs appear when there is an implicit function declaration in the parameter list. */ /* When we reinsert this decl in the function body, we need to reconstruct whether it was marked as nested. */ gcc_assert (TREE_CODE (decl) == FUNCTION_DECL ? b->nested : !b->nested); DECL_CHAIN (decl) = others; others = decl; /* fall through */ case ERROR_MARK: /* error_mark_node appears here when we have an undeclared variable. Just throw it away. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } break; /* Other things that might be encountered. */ case LABEL_DECL: case VAR_DECL: default: gcc_unreachable (); } b = free_binding_and_advance (b); } arg_info->parms = parms; arg_info->tags = tags; arg_info->types = types; arg_info->others = others; arg_info->pending_sizes = expr; return arg_info; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference with location LOC if it is not defined. Return a c_typespec structure for the type specifier. */ struct c_typespec parser_xref_tag (location_t loc, enum tree_code code, tree name) { struct c_typespec ret; tree ref; location_t refloc; ret.expr = NULL_TREE; ret.expr_const_operands = true; /* If a cross reference is requested, look up the type already defined for this tag and return it. */ ref = lookup_tag (code, name, 0, &refloc); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the wrong type in the same scope, we will have had an error message already; if in a different scope and declaring a name, pending_xref_error will give an error message; but if in a different scope and not declaring a name, this tag should shadow the previous declaration of a different type of tag, and this would not work properly if we return the reference found. (For example, with "struct foo" in an outer scope, "union foo;" must shadow that tag with a new one of union type.) */ ret.kind = (ref ? ctsk_tagref : ctsk_tagfirstref); if (ref && TREE_CODE (ref) == code) { if (C_TYPE_DEFINED_IN_STRUCT (ref) && loc != UNKNOWN_LOCATION && warn_cxx_compat) { switch (code) { case ENUMERAL_TYPE: warning_at (loc, OPT_Wc___compat, ("enum type defined in struct or union " "is not visible in C++")); inform (refloc, "enum type defined here"); break; case RECORD_TYPE: warning_at (loc, OPT_Wc___compat, ("struct defined in struct or union " "is not visible in C++")); inform (refloc, "struct defined here"); break; case UNION_TYPE: warning_at (loc, OPT_Wc___compat, ("union defined in struct or union " "is not visible in C++")); inform (refloc, "union defined here"); break; default: gcc_unreachable(); } } ret.spec = ref; return ret; } /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ ref = make_node (code); if (code == ENUMERAL_TYPE) { /* Give the type a default layout like unsigned int to avoid crashing if it does not get defined. */ SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node)); TYPE_ALIGN (ref) = TYPE_ALIGN (unsigned_type_node); TYPE_USER_ALIGN (ref) = 0; TYPE_UNSIGNED (ref) = 1; TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node); TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node); TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node); } pushtag (loc, name, ref); ret.spec = ref; return ret; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. Return a tree for the type. */ tree xref_tag (enum tree_code code, tree name) { return parser_xref_tag (input_location, code, name).spec; } /* Make sure that the tag NAME is defined *in the current scope* at least as a forward reference. LOC is the location of the struct's definition. CODE says which kind of tag NAME ought to be. This stores the current value of the file static STRUCT_PARSE_INFO in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a new c_struct_parse_info structure. The old value of STRUCT_PARSE_INFO is restored in finish_struct. */ tree start_struct (location_t loc, enum tree_code code, tree name, struct c_struct_parse_info **enclosing_struct_parse_info) { /* If there is already a tag defined at this scope (as a forward reference), just return it. */ tree ref = NULL_TREE; location_t refloc = UNKNOWN_LOCATION; if (name != NULL_TREE) ref = lookup_tag (code, name, 1, &refloc); if (ref && TREE_CODE (ref) == code) { if (TYPE_SIZE (ref)) { if (code == UNION_TYPE) error_at (loc, "redefinition of %<union %E%>", name); else error_at (loc, "redefinition of %<struct %E%>", name); if (refloc != UNKNOWN_LOCATION) inform (refloc, "originally defined here"); /* Don't create structures using a name already in use. */ ref = NULL_TREE; } else if (C_TYPE_BEING_DEFINED (ref)) { if (code == UNION_TYPE) error_at (loc, "nested redefinition of %<union %E%>", name); else error_at (loc, "nested redefinition of %<struct %E%>", name); /* Don't bother to report "originally defined here" for a nested redefinition; the original definition should be obvious. */ /* Don't create structures that contain themselves. */ ref = NULL_TREE; } } /* Otherwise create a forward-reference just so the tag is in scope. */ if (ref == NULL_TREE || TREE_CODE (ref) != code) { ref = make_node (code); pushtag (loc, name, ref); } C_TYPE_BEING_DEFINED (ref) = 1; TYPE_PACKED (ref) = flag_pack_struct; *enclosing_struct_parse_info = struct_parse_info; struct_parse_info = XNEW (struct c_struct_parse_info); struct_parse_info->struct_types.create (0); struct_parse_info->fields.create (0); struct_parse_info->typedefs_seen.create (0); /* FIXME: This will issue a warning for a use of a type defined within a statement expr used within sizeof, et. al. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return ref; } /* Process the specs, declarator and width (NULL if omitted) of a structure component, returning a FIELD_DECL node. WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node. DECL_ATTRS is as for grokdeclarator. LOC is the location of the structure component. This is done during the parsing of the struct declaration. The FIELD_DECL nodes are chained together and the lot of them are ultimately passed to `build_struct' to make the RECORD_TYPE node. */ tree grokfield (location_t loc, struct c_declarator *declarator, struct c_declspecs *declspecs, tree width, tree *decl_attrs) { tree value; if (declarator->kind == cdk_id && declarator->u.id == NULL_TREE && width == NULL_TREE) { /* This is an unnamed decl. If we have something of the form "union { list } ;" then this is the anonymous union extension. Similarly for struct. If this is something of the form "struct foo;", then If MS or Plan 9 extensions are enabled, this is handled as an anonymous struct. Otherwise this is a forward declaration of a structure tag. If this is something of the form "foo;" and foo is a TYPE_DECL, then If foo names a structure or union without a tag, then this is an anonymous struct (this is permitted by C11). If MS or Plan 9 extensions are enabled and foo names a structure, then again this is an anonymous struct. Otherwise this is an error. Oh what a horrid tangled web we weave. I wonder if MS consciously took this from Plan 9 or if it was an accident of implementation that took root before someone noticed the bug... */ tree type = declspecs->type; bool type_ok = (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE); bool ok = false; if (type_ok && (flag_ms_extensions || flag_plan9_extensions || !declspecs->typedef_p)) { if (flag_ms_extensions || flag_plan9_extensions) ok = true; else if (TYPE_NAME (type) == NULL) ok = true; else ok = false; } if (!ok) { pedwarn (loc, 0, "declaration does not declare anything"); return NULL_TREE; } if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 doesn%'t support unnamed structs/unions"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 doesn%'t support unnamed structs/unions"); } value = grokdeclarator (declarator, declspecs, FIELD, false, width ? &width : NULL, decl_attrs, NULL, NULL, DEPRECATED_NORMAL); finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE); DECL_INITIAL (value) = width; if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE) { /* If we currently have a binding for this field, set the in_struct field in the binding, so that we warn about lookups which find it. */ struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value)); if (b != NULL) { /* If the in_struct field is not yet set, push it on a list to be cleared when this struct is finished. */ if (!b->in_struct) { struct_parse_info->fields.safe_push (b); b->in_struct = 1; } } } return value; } /* Subroutine of detect_field_duplicates: return whether X and Y, which are both fields in the same struct, have duplicate field names. */ static bool is_duplicate_field (tree x, tree y) { if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y)) return true; /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE)) { tree xt, xn, yt, yn; xt = TREE_TYPE (x); if (DECL_NAME (x) != NULL_TREE) xn = DECL_NAME (x); else if ((TREE_CODE (xt) == RECORD_TYPE || TREE_CODE (xt) == UNION_TYPE) && TYPE_NAME (xt) != NULL_TREE && TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL) xn = DECL_NAME (TYPE_NAME (xt)); else xn = NULL_TREE; yt = TREE_TYPE (y); if (DECL_NAME (y) != NULL_TREE) yn = DECL_NAME (y); else if ((TREE_CODE (yt) == RECORD_TYPE || TREE_CODE (yt) == UNION_TYPE) && TYPE_NAME (yt) != NULL_TREE && TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL) yn = DECL_NAME (TYPE_NAME (yt)); else yn = NULL_TREE; if (xn != NULL_TREE && xn == yn) return true; } return false; } /* Subroutine of detect_field_duplicates: add the fields of FIELDLIST to HTAB, giving errors for any duplicates. */ static void detect_field_duplicates_hash (tree fieldlist, hash_table<pointer_hash <tree_node> > *htab) { tree x, y; tree_node **slot; for (x = fieldlist; x ; x = DECL_CHAIN (x)) if ((y = DECL_NAME (x)) != 0) { slot = htab->find_slot (y, INSERT); if (*slot) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } *slot = y; } else if (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) { detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab); /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL) { tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x))); slot = htab->find_slot (xn, INSERT); if (*slot) error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x))); *slot = xn; } } } /* Generate an error for any duplicate field names in FIELDLIST. Munge the list such that this does not present a problem later. */ static void detect_field_duplicates (tree fieldlist) { tree x, y; int timeout = 10; /* If the struct is the list of instance variables of an Objective-C class, then we need to check all the instance variables of superclasses when checking for duplicates (since you can't have an instance variable in a subclass with the same name as an instance variable in a superclass). We pass on this job to the Objective-C compiler. objc_detect_field_duplicates() will return false if we are not checking the list of instance variables and the C frontend should proceed with the standard field duplicate checks. If we are checking the list of instance variables, the ObjC frontend will do the check, emit the errors if needed, and then return true. */ if (c_dialect_objc ()) if (objc_detect_field_duplicates (false)) return; /* First, see if there are more than "a few" fields. This is trivially true if there are zero or one fields. */ if (!fieldlist || !DECL_CHAIN (fieldlist)) return; x = fieldlist; do { timeout--; if (DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) timeout = 0; x = DECL_CHAIN (x); } while (timeout > 0 && x); /* If there were "few" fields and no anonymous structures or unions, avoid the overhead of allocating a hash table. Instead just do the nested traversal thing. */ if (timeout > 0) { for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x)) /* When using -fplan9-extensions, we can have duplicates between typedef names and fields. */ if (DECL_NAME (x) || (flag_plan9_extensions && DECL_NAME (x) == NULL_TREE && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)) { for (y = fieldlist; y != x; y = TREE_CHAIN (y)) if (is_duplicate_field (y, x)) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } } } else { hash_table<pointer_hash <tree_node> > htab (37); detect_field_duplicates_hash (fieldlist, &htab); } } /* Finish up struct info used by -Wc++-compat. */ static void warn_cxx_compat_finish_struct (tree fieldlist) { unsigned int ix; tree x; struct c_binding *b; /* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in the current struct. We do this now at the end of the struct because the flag is used to issue visibility warnings, and we only want to issue those warnings if the type is referenced outside of the struct declaration. */ FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x) C_TYPE_DEFINED_IN_STRUCT (x) = 1; /* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of typedefs used when declaring fields in this struct. If the name of any of the fields is also a typedef name then the struct would not parse in C++, because the C++ lookup rules say that the typedef name would be looked up in the context of the struct, and would thus be the field rather than the typedef. */ if (!struct_parse_info->typedefs_seen.is_empty () && fieldlist != NULL_TREE) { /* Use a hash_set<tree> using the name of the typedef. We can use a hash_set<tree> because identifiers are interned. */ hash_set<tree> tset; FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x) tset.add (DECL_NAME (x)); for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE && tset.contains (DECL_NAME (x))) { warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat, ("using %qD as both field and typedef name is " "invalid in C++"), x); /* FIXME: It would be nice to report the location where the typedef name is used. */ } } } /* For each field which has a binding and which was not defined in an enclosing struct, clear the in_struct field. */ FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b) b->in_struct = 0; } /* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T. LOC is the location of the RECORD_TYPE or UNION_TYPE's definition. FIELDLIST is a chain of FIELD_DECL nodes for the fields. ATTRIBUTES are attributes to be applied to the structure. ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when the struct was started. */ tree finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, struct c_struct_parse_info *enclosing_struct_parse_info) { tree x; bool toplevel = file_scope == current_scope; int saw_named_field; /* If this type was previously laid out as a forward reference, make sure we lay it out again. */ TYPE_SIZE (t) = 0; decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); if (pedantic) { for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != 0) break; if (flag_isoc11 && (TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE)) break; } if (x == 0) { if (TREE_CODE (t) == UNION_TYPE) { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "union has no named members"); else pedwarn (loc, OPT_Wpedantic, "union has no members"); } else { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "struct has no named members"); else pedwarn (loc, OPT_Wpedantic, "struct has no members"); } } } /* Install struct as DECL_CONTEXT of each field decl. Also process specified field sizes, found in the DECL_INITIAL, storing 0 there after the type has been changed to precision equal to its width, rather than the precision of the specified standard type. (Correct layout requires the original type to have been preserved until now.) */ saw_named_field = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (TREE_TYPE (x) == error_mark_node) continue; DECL_CONTEXT (x) = t; /* If any field is const, the structure type is pseudo-const. */ if (TREE_READONLY (x)) C_TYPE_FIELDS_READONLY (t) = 1; else { /* A field that is pseudo-const makes the structure likewise. */ tree t1 = strip_array_types (TREE_TYPE (x)); if ((TREE_CODE (t1) == RECORD_TYPE || TREE_CODE (t1) == UNION_TYPE) && C_TYPE_FIELDS_READONLY (t1)) C_TYPE_FIELDS_READONLY (t) = 1; } /* Any field that is volatile means variables of this type must be treated in some ways as volatile. */ if (TREE_THIS_VOLATILE (x)) C_TYPE_FIELDS_VOLATILE (t) = 1; /* Any field of nominal variable size implies structure is too. */ if (C_DECL_VARIABLE_SIZE (x)) C_TYPE_VARIABLE_SIZE (t) = 1; if (DECL_INITIAL (x)) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; SET_DECL_C_BIT_FIELD (x); } if (TYPE_PACKED (t) && (DECL_BIT_FIELD (x) || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)) DECL_PACKED (x) = 1; /* Detect flexible array member in an invalid context. */ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in union"); TREE_TYPE (x) = error_mark_node; } else if (DECL_CHAIN (x) != NULL_TREE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member not at end of struct"); TREE_TYPE (x) = error_mark_node; } else if (!saw_named_field) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in otherwise empty struct"); TREE_TYPE (x) = error_mark_node; } } if (pedantic && TREE_CODE (t) == RECORD_TYPE && flexible_array_type_p (TREE_TYPE (x))) pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic, "invalid use of structure with flexible array member"); if (DECL_NAME (x) || TREE_CODE (TREE_TYPE (x)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (x)) == UNION_TYPE) saw_named_field = 1; } detect_field_duplicates (fieldlist); /* Now we have the nearly final fieldlist. Record it, then lay out the structure or union (including the fields). */ TYPE_FIELDS (t) = fieldlist; layout_type (t); if (TYPE_SIZE_UNIT (t) && TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST && !TREE_OVERFLOW (TYPE_SIZE_UNIT (t)) && !valid_constant_size_p (TYPE_SIZE_UNIT (t))) error ("type %qT is too large", t); /* Give bit-fields their proper types. */ { tree *fieldlistp = &fieldlist; while (*fieldlistp) if (TREE_CODE (*fieldlistp) == FIELD_DECL && DECL_INITIAL (*fieldlistp) && TREE_TYPE (*fieldlistp) != error_mark_node) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (*fieldlistp)); tree type = TREE_TYPE (*fieldlistp); if (width != TYPE_PRECISION (type)) { TREE_TYPE (*fieldlistp) = c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type)); DECL_MODE (*fieldlistp) = TYPE_MODE (TREE_TYPE (*fieldlistp)); } DECL_INITIAL (*fieldlistp) = 0; } else fieldlistp = &DECL_CHAIN (*fieldlistp); } /* Now we have the truly final field list. Store it in this type and in the variants. */ TYPE_FIELDS (t) = fieldlist; /* If there are lots of fields, sort so we can look through them fast. We arbitrarily consider 16 or more elts to be "a lot". */ { int len = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (len > 15 || DECL_NAME (x) == NULL) break; len += 1; } if (len > 15) { tree *field_array; struct lang_type *space; struct sorted_fields_type *space2; len += list_length (x); /* Use the same allocation policy here that make_node uses, to ensure that this lives as long as the rest of the struct decl. All decls in an inline function need to be saved. */ space = ggc_cleared_alloc<struct lang_type> (); space2 = (sorted_fields_type *) ggc_internal_alloc (sizeof (struct sorted_fields_type) + len * sizeof (tree)); len = 0; space->s = space2; field_array = &space2->elts[0]; for (x = fieldlist; x; x = DECL_CHAIN (x)) { field_array[len++] = x; /* If there is anonymous struct or union, break out of the loop. */ if (DECL_NAME (x) == NULL) break; } /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */ if (x == NULL) { TYPE_LANG_SPECIFIC (t) = space; TYPE_LANG_SPECIFIC (t)->s->len = len; field_array = TYPE_LANG_SPECIFIC (t)->s->elts; qsort (field_array, len, sizeof (tree), field_decl_cmp); } } } for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x)) { TYPE_FIELDS (x) = TYPE_FIELDS (t); TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t); C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t); C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t); C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t); } /* If this was supposed to be a transparent union, but we can't make it one, warn and turn off the flag. */ if (TREE_CODE (t) == UNION_TYPE && TYPE_TRANSPARENT_AGGR (t) && (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))) { TYPE_TRANSPARENT_AGGR (t) = 0; warning_at (loc, 0, "union cannot be made transparent"); } /* If this structure or union completes the type of any previous variable declaration, lay it out and output its rtl. */ for (x = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)); x; x = TREE_CHAIN (x)) { tree decl = TREE_VALUE (x); if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (decl)); if (TREE_CODE (decl) != TYPE_DECL) { layout_decl (decl, 0); if (c_dialect_objc ()) objc_check_decl (decl); rest_of_decl_compilation (decl, toplevel, 0); } } C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)) = 0; /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ if (TYPE_STUB_DECL (t)) DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc; /* Finish debugging output for this type. */ rest_of_type_compilation (t, toplevel); /* If we're inside a function proper, i.e. not file-scope and not still parsing parameters, then arrange for the size of a variable sized type to be bound now. */ if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE)) add_stmt (build_stmt (loc, DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t))); if (warn_cxx_compat) warn_cxx_compat_finish_struct (fieldlist); struct_parse_info->struct_types.release (); struct_parse_info->fields.release (); struct_parse_info->typedefs_seen.release (); XDELETE (struct_parse_info); struct_parse_info = enclosing_struct_parse_info; /* If this struct is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (t); return t; } /* Lay out the type T, and its element type, and so on. */ static void layout_array_type (tree t) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (t)); layout_type (t); } /* Begin compiling the definition of an enumeration type. NAME is its name (or null if anonymous). LOC is the enum's location. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (location_t loc, struct c_enum_contents *the_enum, tree name) { tree enumtype = NULL_TREE; location_t enumloc = UNKNOWN_LOCATION; /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (name != NULL_TREE) enumtype = lookup_tag (ENUMERAL_TYPE, name, 1, &enumloc); if (enumtype == 0 || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (loc, name, enumtype); } if (C_TYPE_BEING_DEFINED (enumtype)) error_at (loc, "nested redefinition of %<enum %E%>", name); C_TYPE_BEING_DEFINED (enumtype) = 1; if (TYPE_VALUES (enumtype) != 0) { /* This enum is a named one that has been declared already. */ error_at (loc, "redeclaration of %<enum %E%>", name); if (enumloc != UNKNOWN_LOCATION) inform (enumloc, "originally defined here"); /* Completely replace its old definition. The old enumerators remain defined, however. */ TYPE_VALUES (enumtype) = 0; } the_enum->enum_next_value = integer_zero_node; the_enum->enum_overflow = 0; if (flag_short_enums) TYPE_PACKED (enumtype) = 1; /* FIXME: This will issue a warning for a use of a type defined within sizeof in a statement expr. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type and finish it off. ENUMTYPE is the type object, VALUES a list of decl-value pairs, and ATTRIBUTES are the specified attributes. Returns ENUMTYPE. */ tree finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; int precision; signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); /* Calculate the maximum value of any enumerator in this type. */ if (values == error_mark_node) minnode = maxnode = integer_zero_node; else { minnode = maxnode = TREE_VALUE (values); for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair)) { tree value = TREE_VALUE (pair); if (tree_int_cst_lt (maxnode, value)) maxnode = value; if (tree_int_cst_lt (value, minnode)) minnode = value; } } /* Construct the final type of this enumeration. It is the same as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; precision = MAX (tree_int_cst_min_precision (minnode, sign), tree_int_cst_min_precision (maxnode, sign)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype)) { if (precision > TYPE_PRECISION (enumtype)) { TYPE_PRECISION (enumtype) = 0; error ("specified mode too small for enumeral values"); } else precision = TYPE_PRECISION (enumtype); } if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node) || TYPE_PRECISION (enumtype)) { tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); tem = long_long_integer_type_node; } } else tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem); TYPE_SIZE (enumtype) = 0; TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem); layout_type (enumtype); if (values != error_mark_node) { /* Change the type of the enumerators to be the enum type. We need to do this irrespective of the size of the enum, for proper type checking. Replace the DECL_INITIALs of the enumerators, and the value slots of the list, with copies that have the enum type; they cannot be modified in place because they may be shared (e.g. integer_zero_node) Finally, change the purpose slots to point to the names of the decls. */ for (pair = values; pair; pair = TREE_CHAIN (pair)) { tree enu = TREE_PURPOSE (pair); tree ini = DECL_INITIAL (enu); TREE_TYPE (enu) = enumtype; /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. build_enumerator() converts any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, build_enumerator() would have already warned about those that don't fit. Here we convert the rest to the enumerator type. */ if (TREE_TYPE (ini) != integer_type_node) ini = convert (enumtype, ini); DECL_INITIAL (enu) = ini; TREE_PURPOSE (pair) = DECL_NAME (enu); TREE_VALUE (pair) = ini; } TYPE_VALUES (enumtype) = values; } /* Record the min/max values so that we can warn about bit-field enumerations that are too small for the values. */ lt = ggc_cleared_alloc<struct lang_type> (); lt->enum_min = minnode; lt->enum_max = maxnode; TYPE_LANG_SPECIFIC (enumtype) = lt; /* Fix up all variant types of this enum type. */ for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem)) { if (tem == enumtype) continue; TYPE_VALUES (tem) = TYPE_VALUES (enumtype); TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype); TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype); TYPE_SIZE (tem) = TYPE_SIZE (enumtype); TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype); SET_TYPE_MODE (tem, TYPE_MODE (enumtype)); TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype); TYPE_ALIGN (tem) = TYPE_ALIGN (enumtype); TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype); TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype); TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, toplevel); /* If this enum is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (enumtype); return enumtype; } /* Build and install a CONST_DECL for one value of the current enumeration type (one that was begun with start_enum). DECL_LOC is the location of the enumerator. LOC is the location of the '=' operator if any, DECL_LOC otherwise. Return a tree-list containing the CONST_DECL and its value. Assignment of sequential values by default is handled here. */ tree build_enumerator (location_t decl_loc, location_t loc, struct c_enum_contents *the_enum, tree name, tree value) { tree decl, type; /* Validate and default VALUE. */ if (value != 0) { /* Don't issue more errors for error_mark_node (i.e. an undeclared identifier) - just ignore the value expression. */ if (value == error_mark_node) value = 0; else if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (loc, "enumerator value for %qE is not an integer constant", name); value = 0; } else { if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); if (TREE_CODE (value) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "enumerator value for %qE is not an integer " "constant expression", name); } if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qE is not an integer constant", name); value = 0; } else { value = default_conversion (value); constant_expression_warning (value); } } } /* Default based on previous value. */ /* It should no longer be possible to have NON_LVALUE_EXPR in the default. */ if (value == 0) { value = the_enum->enum_next_value; if (the_enum->enum_overflow) error_at (loc, "overflow in enumeration values"); } /* Even though the underlying type of an enum is unspecified, the type of enumeration constants is explicitly defined as int (6.4.4.3/2 in the C99 Standard). GCC allows any integer type as an extension. */ else if (!int_fits_type_p (value, integer_type_node)) pedwarn (loc, OPT_Wpedantic, "ISO C restricts enumerator values to range of %<int%>"); /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. Here we convert any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, we would have already warned about those that don't fit. We have to do this here rather than in finish_enum because this value may be used to define more enumerators. */ if (int_fits_type_p (value, integer_type_node)) value = convert (integer_type_node, value); /* Set basis for default for next value. */ the_enum->enum_next_value = build_binary_op (EXPR_LOC_OR_LOC (value, input_location), PLUS_EXPR, value, integer_one_node, 0); the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value); /* Now create a declaration for the enum value name. */ type = TREE_TYPE (value); type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node) && TYPE_UNSIGNED (type))); decl = build_decl (decl_loc, CONST_DECL, name, type); DECL_INITIAL (decl) = convert (type, value); pushdecl (decl); return tree_cons (decl, value, NULL_TREE); } /* Create the FUNCTION_DECL for a function definition. DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns 1 on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return 0, which tells yyparse to report a parse error. */ int start_function (struct c_declspecs *declspecs, struct c_declarator *declarator, tree attributes) { tree decl1, old_decl; tree restype, resdecl; location_t loc; current_function_returns_value = 0; /* Assume, until we see it does. */ current_function_returns_null = 0; current_function_returns_abnormally = 0; warn_about_return_type = 0; c_switch_stack = NULL; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL, &attributes, NULL, NULL, DEPRECATED_NORMAL); /* If the declarator is not suitable for a function definition, cause a syntax error. */ if (decl1 == 0 || TREE_CODE (decl1) != FUNCTION_DECL) return 0; loc = DECL_SOURCE_LOCATION (decl1); c_decl_attributes (&decl1, attributes, 0); if (DECL_DECLARED_INLINE_P (decl1) && DECL_UNINLINABLE (decl1) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1))) warning_at (loc, OPT_Wattributes, "inline function %qD given attribute noinline", decl1); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl1) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1)) || current_function_decl)) { if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1); } announce_function (decl1); if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1)))) { error_at (loc, "return type is an incomplete type"); /* Make it return void instead. */ TREE_TYPE (decl1) = build_function_type (void_type_node, TYPE_ARG_TYPES (TREE_TYPE (decl1))); } if (warn_about_return_type) warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int : (warn_return_type ? OPT_Wreturn_type : OPT_Wimplicit_int), "return type defaults to %<int%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in pop_scope) with the BLOCK. */ DECL_INITIAL (decl1) = error_mark_node; /* A nested function is not global. */ if (current_function_decl != 0) TREE_PUBLIC (decl1) = 0; /* If this definition isn't a prototype and we had a prototype declaration before, copy the arg type info from that prototype. */ old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL) old_decl = 0; current_function_prototype_locus = UNKNOWN_LOCATION; current_function_prototype_built_in = false; current_function_prototype_arg_types = NULL_TREE; if (!prototype_p (TREE_TYPE (decl1))) { if (old_decl != 0 && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (TREE_TYPE (old_decl)))) { TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl), TREE_TYPE (decl1)); current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (old_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl1)); } if (TREE_PUBLIC (decl1)) { /* If there is an external prototype declaration of this function, record its location but do not copy information to this decl. This may be an invisible declaration (built-in or in a scope which has finished) or simply have more refined argument types than any declaration found above. */ struct c_binding *b; for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed) if (B_IN_SCOPE (b, external_scope)) break; if (b) { tree ext_decl, ext_type; ext_decl = b->decl; ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl); if (TREE_CODE (ext_type) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (ext_type))) { current_function_prototype_locus = DECL_SOURCE_LOCATION (ext_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (ext_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (ext_type); } } } } /* Optionally warn of old-fashioned def with no previous prototype. */ if (warn_strict_prototypes && old_decl != error_mark_node && !prototype_p (TREE_TYPE (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl)) warning_at (loc, OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); /* Optionally warn of any global def with no previous prototype. */ else if (warn_missing_prototypes && old_decl != error_mark_node && TREE_PUBLIC (decl1) && !MAIN_NAME_P (DECL_NAME (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_prototypes, "no previous prototype for %qD", decl1); /* Optionally warn of any def with no previous prototype if the function has already been used. */ else if (warn_missing_prototypes && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && !prototype_p (TREE_TYPE (old_decl))) warning_at (loc, OPT_Wmissing_prototypes, "%qD was used with no prototype before its definition", decl1); /* Optionally warn of any global def with no previous declaration. */ else if (warn_missing_declarations && TREE_PUBLIC (decl1) && old_decl == 0 && !MAIN_NAME_P (DECL_NAME (decl1)) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); /* Optionally warn of any def with no previous declaration if the function has already been used. */ else if (warn_missing_declarations && old_decl != 0 && old_decl != error_mark_node && TREE_USED (old_decl) && C_DECL_IMPLICIT (old_decl)) warning_at (loc, OPT_Wmissing_declarations, "%qD was used with no declaration before its definition", decl1); /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* This is the earliest point at which we might know the assembler name of the function. Thus, if it's set before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1)); /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl1); /* Warn for unlikely, improbable, or stupid declarations of `main'. */ if (warn_main && MAIN_NAME_P (DECL_NAME (decl1))) { if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) != integer_type_node) pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1); else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1)))) pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD", decl1); check_main_parameter_types (decl1); if (!TREE_PUBLIC (decl1)) pedwarn (loc, OPT_Wmain, "%qD is normally a non-static function", decl1); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ current_function_decl = pushdecl (decl1); push_scope (); declare_parm_level (); restype = TREE_TYPE (TREE_TYPE (current_function_decl)); resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (current_function_decl) = resdecl; start_fname_decls (); return 1; } /* Subroutine of store_parm_decls which handles new-style function definitions (prototype format). The parms already have decls, so we need only record them as in effect and complain if any redundant old-style parm decls were written. */ static void store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info) { tree decl; c_arg_tag *tag; unsigned ix; if (current_scope->bindings) { error_at (DECL_SOURCE_LOCATION (fndecl), "old-style parameter declarations in prototyped " "function definition"); /* Get rid of the old-style declarations. */ pop_scope (); push_scope (); } /* Don't issue this warning for nested functions, and don't issue this warning if we got here because ARG_INFO_TYPES was error_mark_node (this happens when a function definition has just an ellipsis in its parameter list). */ else if (!in_system_header_at (input_location) && !current_function_scope && arg_info->types != error_mark_node) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional, "traditional C rejects ISO C style function definitions"); /* Now make all the parameter declarations visible in the function body. We can bypass most of the grunt work of pushdecl. */ for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) { bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); if (!TREE_USED (decl)) warn_if_shadowing (decl); } else error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted"); } /* Record the parameter list in the function declaration. */ DECL_ARGUMENTS (fndecl) = arg_info->parms; /* Now make all the ancillary declarations visible, likewise. */ for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/(TREE_CODE (decl) == FUNCTION_DECL), UNKNOWN_LOCATION); } /* And all the tag declarations. */ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) if (tag->id) bind (tag->id, tag->type, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of store_parm_decls which handles old-style function definitions (separate parameter list and declarations). */ static void store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info) { struct c_binding *b; tree parm, decl, last; tree parmids = arg_info->parms; hash_set<tree> seen_args; if (!in_system_header_at (input_location)) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); /* Match each formal parameter name with its declaration. Save each decl in the appropriate TREE_PURPOSE slot of the parmids chain. */ for (parm = parmids; parm; parm = TREE_CHAIN (parm)) { if (TREE_VALUE (parm) == 0) { error_at (DECL_SOURCE_LOCATION (fndecl), "parameter name missing from parameter list"); TREE_PURPOSE (parm) = 0; continue; } b = I_SYMBOL_BINDING (TREE_VALUE (parm)); if (b && B_IN_CURRENT_SCOPE (b)) { decl = b->decl; /* Skip erroneous parameters. */ if (decl == error_mark_node) continue; /* If we got something other than a PARM_DECL it is an error. */ if (TREE_CODE (decl) != PARM_DECL) error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as a non-parameter", decl); /* If the declaration is already marked, we have a duplicate name. Complain and ignore the duplicate. */ else if (seen_args.contains (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "multiple parameters named %qD", decl); TREE_PURPOSE (parm) = 0; continue; } /* If the declaration says "void", complain and turn it into an int. */ else if (VOID_TYPE_P (TREE_TYPE (decl))) { error_at (DECL_SOURCE_LOCATION (decl), "parameter %qD declared with void type", decl); TREE_TYPE (decl) = integer_type_node; DECL_ARG_TYPE (decl) = integer_type_node; layout_decl (decl, 0); } warn_if_shadowing (decl); } /* If no declaration found, default to int. */ else { /* FIXME diagnostics: This should be the location of the argument, not the FNDECL. E.g., for an old-style declaration int f10(v) { blah; } We should use the location of the V, not the F10. Unfortunately, the V is an IDENTIFIER_NODE which has no location. In the future we need locations for c_arg_info entries. See gcc.dg/Wshadow-3.c for an example of this problem. */ decl = build_decl (DECL_SOURCE_LOCATION (fndecl), PARM_DECL, TREE_VALUE (parm), integer_type_node); DECL_ARG_TYPE (decl) = TREE_TYPE (decl); pushdecl (decl); warn_if_shadowing (decl); if (flag_isoc99) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wimplicit_int, "type of %qD defaults to %<int%>", decl); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wmissing_parameter_type, "type of %qD defaults to %<int%>", decl); } TREE_PURPOSE (parm) = decl; seen_args.add (decl); } /* Now examine the parms chain for incomplete declarations and declarations with no corresponding names. */ for (b = current_scope->bindings; b; b = b->prev) { parm = b->decl; if (TREE_CODE (parm) != PARM_DECL) continue; if (TREE_TYPE (parm) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (parm))) { error_at (DECL_SOURCE_LOCATION (parm), "parameter %qD has incomplete type", parm); TREE_TYPE (parm) = error_mark_node; } if (!seen_args.contains (parm)) { error_at (DECL_SOURCE_LOCATION (parm), "declaration for parameter %qD but no such parameter", parm); /* Pretend the parameter was not missing. This gets us to a standard state and minimizes further error messages. */ parmids = chainon (parmids, tree_cons (parm, 0, 0)); } } /* Chain the declarations together in the order of the list of names. Store that chain in the function decl, replacing the list of names. Update the current scope to match. */ DECL_ARGUMENTS (fndecl) = 0; for (parm = parmids; parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) break; if (parm && TREE_PURPOSE (parm)) { last = TREE_PURPOSE (parm); DECL_ARGUMENTS (fndecl) = last; for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) { DECL_CHAIN (last) = TREE_PURPOSE (parm); last = TREE_PURPOSE (parm); } DECL_CHAIN (last) = 0; } /* If there was a previous prototype, set the DECL_ARG_TYPE of each argument according to the type previously specified, and report any mismatches. */ if (current_function_prototype_arg_types) { tree type; for (parm = DECL_ARGUMENTS (fndecl), type = current_function_prototype_arg_types; parm || (type && TREE_VALUE (type) != error_mark_node && (TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node)); parm = DECL_CHAIN (parm), type = TREE_CHAIN (type)) { if (parm == 0 || type == 0 || TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node) { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (fndecl), 0, "number of arguments doesn%'t match " "built-in prototype"); else { /* FIXME diagnostics: This should be the location of FNDECL, but there is bug when a prototype is declared inside function context, but defined outside of it (e.g., gcc.dg/pr15698-2.c). In which case FNDECL gets the location of the prototype, not the definition. */ error_at (input_location, "number of arguments doesn%'t match prototype"); error_at (current_function_prototype_locus, "prototype declaration"); } break; } /* Type for passing arg must be consistent with that declared for the arg. ISO C says we take the unqualified type for parameters declared with qualified type. */ if (TREE_TYPE (parm) != error_mark_node && TREE_TYPE (type) != error_mark_node && ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) != TYPE_ATOMIC (TREE_VALUE (type))) || !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)), TYPE_MAIN_VARIANT (TREE_VALUE (type))))) { if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) == TYPE_ATOMIC (TREE_VALUE (type))) && (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == TYPE_MAIN_VARIANT (TREE_VALUE (type)))) { /* Adjust argument to match prototype. E.g. a previous `int foo(float);' prototype causes `int foo(x) float x; {...}' to be treated like `int foo(float x) {...}'. This is particularly useful for argument types like uid_t. */ DECL_ARG_TYPE (parm) = TREE_TYPE (parm); if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl)) && INTEGRAL_TYPE_P (TREE_TYPE (parm)) && TYPE_PRECISION (TREE_TYPE (parm)) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (parm) = c_type_promotes_to (TREE_TYPE (parm)); /* ??? Is it possible to get here with a built-in prototype or will it always have been diagnosed as conflicting with an old-style definition and discarded? */ if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match built-in prototype", parm); else { pedwarn (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match prototype", parm); pedwarn (current_function_prototype_locus, OPT_Wpedantic, "prototype declaration"); } } else { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), 0, "argument %qD doesn%'t match " "built-in prototype", parm); else { error_at (DECL_SOURCE_LOCATION (parm), "argument %qD doesn%'t match prototype", parm); error_at (current_function_prototype_locus, "prototype declaration"); } } } } TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = 0; } /* Otherwise, create a prototype that would match. */ else { tree actual = 0, last = 0, type; for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm)) { type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; last = type; } type = tree_cons (NULL_TREE, void_type_node, NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES of the type of this function, but we need to avoid having this affect the types of other similarly-typed functions, so we must first force the generation of an identical (but separate) type node for the relevant function type. The new node we create will be a variant of the main variant of the original function type. */ TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl)); TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual; } } /* Store parameter declarations passed in ARG_INFO into the current function declaration. */ void store_parm_decls_from (struct c_arg_info *arg_info) { current_function_arg_info = arg_info; store_parm_decls (); } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. For an old-style definition, construct a prototype out of the old-style parameter declarations and inject it into the function's type. */ void store_parm_decls (void) { tree fndecl = current_function_decl; bool proto; /* The argument information block for FNDECL. */ struct c_arg_info *arg_info = current_function_arg_info; current_function_arg_info = 0; /* True if this definition is written with a prototype. Note: despite C99 6.7.5.3p14, we can *not* treat an empty argument list in a function definition as equivalent to (void) -- an empty argument list specifies the function has no parameters, but only (void) sets up a prototype for future calls. */ proto = arg_info->types != 0; if (proto) store_parm_decls_newstyle (fndecl, arg_info); else store_parm_decls_oldstyle (fndecl, arg_info); /* The next call to push_scope will be a function body. */ next_is_function_body = true; /* Write a record describing this function definition to the prototypes file (if requested). */ gen_aux_info_record (fndecl, 1, 0, proto); /* Initialize the RTL code for the function. */ allocate_struct_function (fndecl, false); if (warn_unused_local_typedefs) cfun->language = ggc_cleared_alloc<language_function> (); /* Begin the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = push_stmt_list (); /* ??? Insert the contents of the pending sizes list into the function to be evaluated. The only reason left to have this is void foo(int n, int array[n++]) because we throw away the array type in favor of a pointer type, and thus won't naturally see the SAVE_EXPR containing the increment. All other pending sizes would be handled by gimplify_parameters. */ if (arg_info->pending_sizes) add_stmt (arg_info->pending_sizes); } /* Store PARM_DECLs in PARMS into scope temporarily. Used for c_finish_omp_declare_simd for function prototypes. No diagnostics should be done. */ void temp_store_parm_decls (tree fndecl, tree parms) { push_scope (); for (tree p = parms; p; p = DECL_CHAIN (p)) { DECL_CONTEXT (p) = fndecl; if (DECL_NAME (p)) bind (DECL_NAME (p), p, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } } /* Undo what temp_store_parm_decls did. */ void temp_pop_parm_decls (void) { /* Clear all bindings in this temporary scope, so that pop_scope doesn't create a BLOCK. */ struct c_binding *b = current_scope->bindings; current_scope->bindings = NULL; for (; b; b = free_binding_and_advance (b)) { gcc_assert (TREE_CODE (b->decl) == PARM_DECL || b->decl == error_mark_node); gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } pop_scope (); } /* Finish up a function declaration and compile that function all the way to assembler language output. Then free the storage for the function definition. This is called after parsing the body of the function definition. */ void finish_function (void) { tree fndecl = current_function_decl; if (c_dialect_objc ()) objc_finish_function (); if (TREE_CODE (fndecl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (fndecl))) { tree args = DECL_ARGUMENTS (fndecl); for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Must mark the RESULT_DECL as being in this function. */ if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node) DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) == integer_type_node && flag_isoc99) { /* Hack. We don't want the middle-end to warn that this return is unreachable, so we mark its location as special. Using UNKNOWN_LOCATION has the problem that it gets clobbered in annotate_one_with_locus. A cleaner solution might be to ensure ! should_carry_locus_p (stmt), but that needs a flag. */ c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE); } /* Tie off the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); /* If the function has _Cilk_spawn in front of a function call inside it i.e. it is a spawning function, then add the appropriate Cilk plus functions inside. */ if (fn_contains_cilk_spawn_p (cfun)) cfun->cilk_frame_decl = insert_cilk_frame (fndecl); finish_fname_decls (); /* Complain if there's just no return statement. */ if (warn_return_type && TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE && !current_function_returns_value && !current_function_returns_null /* Don't complain if we are no-return. */ && !current_function_returns_abnormally /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) /* Don't warn for main(). */ && !MAIN_NAME_P (DECL_NAME (fndecl)) /* Or if they didn't actually specify a return type. */ && !C_FUNCTION_IMPLICIT_INT (fndecl) /* Normally, with -Wreturn-type, flow will complain, but we might optimize out static functions. */ && !TREE_PUBLIC (fndecl)) { warning (OPT_Wreturn_type, "no return statement in function returning non-void"); TREE_NO_WARNING (fndecl) = 1; } /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = input_location; /* Finalize the ELF visibility for the function. */ c_determine_visibility (fndecl); /* For GNU C extern inline functions disregard inline limits. */ if (DECL_EXTERNAL (fndecl) && DECL_DECLARED_INLINE_P (fndecl)) DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1; /* Genericize before inlining. Delay genericizing nested functions until their parent function is genericized. Since finalizing requires GENERIC, delay that as well. */ if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node && !undef_nested_function) { if (!decl_function_context (fndecl)) { invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); c_genericize (fndecl); /* ??? Objc emits functions after finalizing the compilation unit. This should be cleaned up later and this conditional removed. */ if (symtab->global_info_ready) { cgraph_node::add_new_function (fndecl, false); return; } cgraph_node::finalize_function (fndecl, false); } else { /* Register this function with cgraph just far enough to get it added to our parent's nested function list. Handy, since the C front end doesn't have such a list. */ (void) cgraph_node::get_create (fndecl); } } if (!decl_function_context (fndecl)) undef_nested_function = false; if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); current_function_decl = NULL; } /* Check the declarations given in a for-loop for satisfying the C99 constraints. If exactly one such decl is found, return it. LOC is the location of the opening parenthesis of the for loop. The last parameter allows you to control the "for loop initial declarations are only allowed in C99 mode". Normally, you should pass flag_isoc99 as that parameter. But in some cases (Objective-C foreach loop, for example) we want to run the checks in this function even if not in C99 mode, so we allow the caller to turn off the error about not being in C99 mode. */ tree check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error) { struct c_binding *b; tree one_decl = NULL_TREE; int n_decls = 0; if (!turn_off_iso_c99_error) { static bool hint = true; /* If we get here, declarations have been used in a for loop without the C99 for loop scope. This doesn't make much sense, so don't allow it. */ error_at (loc, "%<for%> loop initial declarations " "are only allowed in C99 or C11 mode"); if (hint) { inform (loc, "use option -std=c99, -std=gnu99, -std=c11 or -std=gnu11 " "to compile your code"); hint = false; } return NULL_TREE; } /* C99 subclause 6.8.5 paragraph 3: [#3] The declaration part of a for statement shall only declare identifiers for objects having storage class auto or register. It isn't clear whether, in this sentence, "identifiers" binds to "shall only declare" or to "objects" - that is, whether all identifiers declared must be identifiers for objects, or whether the restriction only applies to those that are. (A question on this in comp.std.c in November 2000 received no answer.) We implement the strictest interpretation, to avoid creating an extension which later causes problems. */ for (b = current_scope->bindings; b; b = b->prev) { tree id = b->id; tree decl = b->decl; if (!id) continue; switch (TREE_CODE (decl)) { case VAR_DECL: { location_t decl_loc = DECL_SOURCE_LOCATION (decl); if (TREE_STATIC (decl)) error_at (decl_loc, "declaration of static variable %qD in %<for%> loop " "initial declaration", decl); else if (DECL_EXTERNAL (decl)) error_at (decl_loc, "declaration of %<extern%> variable %qD in %<for%> loop " "initial declaration", decl); } break; case RECORD_TYPE: error_at (loc, "%<struct %E%> declared in %<for%> loop initial " "declaration", id); break; case UNION_TYPE: error_at (loc, "%<union %E%> declared in %<for%> loop initial declaration", id); break; case ENUMERAL_TYPE: error_at (loc, "%<enum %E%> declared in %<for%> loop " "initial declaration", id); break; default: error_at (loc, "declaration of non-variable " "%qD in %<for%> loop initial declaration", decl); } n_decls++; one_decl = decl; } return n_decls == 1 ? one_decl : NULL_TREE; } /* Save and reinitialize the variables used during compilation of a C function. */ void c_push_function_context (void) { struct language_function *p = cfun->language; /* cfun->language might have been already allocated by the use of -Wunused-local-typedefs. In that case, just re-use it. */ if (p == NULL) cfun->language = p = ggc_cleared_alloc<language_function> (); p->base.x_stmt_tree = c_stmt_tree; c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list); p->x_break_label = c_break_label; p->x_cont_label = c_cont_label; p->x_switch_stack = c_switch_stack; p->arg_info = current_function_arg_info; p->returns_value = current_function_returns_value; p->returns_null = current_function_returns_null; p->returns_abnormally = current_function_returns_abnormally; p->warn_about_return_type = warn_about_return_type; push_function_context (); } /* Restore the variables used during compilation of a C function. */ void c_pop_function_context (void) { struct language_function *p; pop_function_context (); p = cfun->language; /* When -Wunused-local-typedefs is in effect, cfun->languages is used to store data throughout the life time of the current cfun, So don't deallocate it. */ if (!warn_unused_local_typedefs) cfun->language = NULL; if (DECL_STRUCT_FUNCTION (current_function_decl) == 0 && DECL_SAVED_TREE (current_function_decl) == NULL_TREE) { /* Stop pointing to the local nodes about to be freed. */ /* But DECL_INITIAL must remain nonzero so we know this was an actual function definition. */ DECL_INITIAL (current_function_decl) = error_mark_node; DECL_ARGUMENTS (current_function_decl) = 0; } c_stmt_tree = p->base.x_stmt_tree; p->base.x_stmt_tree.x_cur_stmt_list = NULL; c_break_label = p->x_break_label; c_cont_label = p->x_cont_label; c_switch_stack = p->x_switch_stack; current_function_arg_info = p->arg_info; current_function_returns_value = p->returns_value; current_function_returns_null = p->returns_null; current_function_returns_abnormally = p->returns_abnormally; warn_about_return_type = p->warn_about_return_type; } /* The functions below are required for functionality of doing function at once processing in the C front end. Currently these functions are not called from anywhere in the C front end, but as these changes continue, that will change. */ /* Returns the stmt_tree (if any) to which statements are currently being added. If there is no active statement-tree, NULL is returned. */ stmt_tree current_stmt_tree (void) { return &c_stmt_tree; } /* Return the global value of T as a symbol. */ tree identifier_global_value (tree t) { struct c_binding *b; for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return 0; } /* In C, the only C-linkage public declaration is at file scope. */ tree c_linkage_bindings (tree name) { return identifier_global_value (name); } /* Record a builtin type for C. If NAME is non-NULL, it is the name used; otherwise the name is found in ridpointers from RID_INDEX. */ void record_builtin_type (enum rid rid_index, const char *name, tree type) { tree id, decl; if (name == 0) id = ridpointers[(int) rid_index]; else id = get_identifier (name); decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type); pushdecl (decl); if (debug_hooks->type_decl) debug_hooks->type_decl (decl, false); } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } /* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */ struct c_parm * build_c_parm (struct c_declspecs *specs, tree attrs, struct c_declarator *declarator) { struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm); ret->specs = specs; ret->attrs = attrs; ret->declarator = declarator; return ret; } /* Return a declarator with nested attributes. TARGET is the inner declarator to which these attributes apply. ATTRS are the attributes. */ struct c_declarator * build_attrs_declarator (tree attrs, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_attrs; ret->declarator = target; ret->u.attrs = attrs; return ret; } /* Return a declarator for a function with arguments specified by ARGS and return type specified by TARGET. */ struct c_declarator * build_function_declarator (struct c_arg_info *args, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_function; ret->declarator = target; ret->u.arg_info = args; return ret; } /* Return a declarator for the identifier IDENT (which may be NULL_TREE for an abstract declarator). */ struct c_declarator * build_id_declarator (tree ident) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_id; ret->declarator = 0; ret->u.id = ident; /* Default value - may get reset to a more precise location. */ ret->id_loc = input_location; return ret; } /* Return something to represent absolute declarators containing a *. TARGET is the absolute declarator that the * contains. TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes to apply to the pointer type. */ struct c_declarator * make_pointer_declarator (struct c_declspecs *type_quals_attrs, struct c_declarator *target) { tree attrs; int quals = 0; struct c_declarator *itarget = target; struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); if (type_quals_attrs) { attrs = type_quals_attrs->attrs; quals = quals_from_declspecs (type_quals_attrs); if (attrs != NULL_TREE) itarget = build_attrs_declarator (attrs, target); } ret->kind = cdk_pointer; ret->declarator = itarget; ret->u.pointer_quals = quals; return ret; } /* Return a pointer to a structure for an empty list of declaration specifiers. */ struct c_declspecs * build_null_declspecs (void) { struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs); memset (&ret->locations, 0, cdw_number_of_elements); ret->type = 0; ret->expr = 0; ret->decl_attr = 0; ret->attrs = 0; ret->align_log = -1; ret->typespec_word = cts_none; ret->storage_class = csc_none; ret->expr_const_operands = true; ret->declspecs_seen_p = false; ret->typespec_kind = ctsk_none; ret->non_sc_seen_p = false; ret->typedef_p = false; ret->explicit_signed_p = false; ret->deprecated_p = false; ret->default_int_p = false; ret->long_p = false; ret->long_long_p = false; ret->short_p = false; ret->signed_p = false; ret->unsigned_p = false; ret->complex_p = false; ret->inline_p = false; ret->noreturn_p = false; ret->thread_p = false; ret->thread_gnu_p = false; ret->const_p = false; ret->volatile_p = false; ret->atomic_p = false; ret->restrict_p = false; ret->saturating_p = false; ret->alignas_p = false; ret->address_space = ADDR_SPACE_GENERIC; return ret; } /* Add the address space ADDRSPACE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_addrspace (source_location location, struct c_declspecs *specs, addr_space_t as) { specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; if (!ADDR_SPACE_GENERIC_P (specs->address_space) && specs->address_space != as) error ("incompatible address space qualifiers %qs and %qs", c_addr_space_name (as), c_addr_space_name (specs->address_space)); else { specs->address_space = as; specs->locations[cdw_address_space] = location; } return specs; } /* Add the type qualifier QUAL to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_qual (source_location loc, struct c_declspecs *specs, tree qual) { enum rid i; bool dupe = false; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (qual)); i = C_RID_CODE (qual); switch (i) { case RID_CONST: dupe = specs->const_p; specs->const_p = true; specs->locations[cdw_const] = loc; break; case RID_VOLATILE: dupe = specs->volatile_p; specs->volatile_p = true; specs->locations[cdw_volatile] = loc; break; case RID_RESTRICT: dupe = specs->restrict_p; specs->restrict_p = true; specs->locations[cdw_restrict] = loc; break; case RID_ATOMIC: dupe = specs->atomic_p; specs->atomic_p = true; break; default: gcc_unreachable (); } if (dupe) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %qE", qual); return specs; } /* Add the type specifier TYPE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_type (location_t loc, struct c_declspecs *specs, struct c_typespec spec) { tree type = spec.spec; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->typespec_kind = spec.kind; if (TREE_DEPRECATED (type)) specs->deprecated_p = true; /* Handle type specifier keywords. */ if (TREE_CODE (type) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (type) && C_RID_CODE (type) != RID_CXX_COMPAT_WARN) { enum rid i = C_RID_CODE (type); if (specs->type) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } if ((int) i <= (int) RID_LAST_MODIFIER) { /* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */ bool dupe = false; switch (i) { case RID_LONG: if (specs->long_long_p) { error_at (loc, "%<long long long%> is too long for GCC"); break; } if (specs->long_p) { if (specs->typespec_word == cts_double) { error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); break; } pedwarn_c90 (loc, OPT_Wlong_long, "ISO C90 does not support %<long long%>"); specs->long_long_p = 1; specs->locations[cdw_long_long] = loc; break; } if (specs->short_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<long%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<long%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<long%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<long%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->long_p = true; specs->locations[cdw_long] = loc; } break; case RID_SHORT: dupe = specs->short_p; if (specs->long_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<short%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<short%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<short%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<short%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->short_p = true; specs->locations[cdw_short] = loc; } break; case RID_SIGNED: dupe = specs->signed_p; if (specs->unsigned_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<signed%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<signed%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<signed%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->signed_p = true; specs->locations[cdw_signed] = loc; } break; case RID_UNSIGNED: dupe = specs->unsigned_p; if (specs->signed_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<unsigned%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<unsigned%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<unsigned%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->unsigned_p = true; specs->locations[cdw_unsigned] = loc; } break; case RID_COMPLEX: dupe = specs->complex_p; if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<complex%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<complex%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<complex%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->typespec_word == cts_fract) error_at (loc, ("both %<complex%> and %<_Fract%> in " "declaration specifiers")); else if (specs->typespec_word == cts_accum) error_at (loc, ("both %<complex%> and %<_Accum%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<complex%> and %<_Sat%> in " "declaration specifiers")); else { specs->complex_p = true; specs->locations[cdw_complex] = loc; } break; case RID_SAT: dupe = specs->saturating_p; pedwarn (loc, OPT_Wpedantic, "ISO C does not support saturating types"); if (specs->typespec_word == cts_int_n) { error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); } else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<_Sat%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<_Sat%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<_Sat%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<_Sat%> and %<complex%> in " "declaration specifiers")); else { specs->saturating_p = true; specs->locations[cdw_saturating] = loc; } break; default: gcc_unreachable (); } if (dupe) error_at (loc, "duplicate %qE", type); return specs; } else { /* "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "__intN", "_Decimal64", "_Decimal128", "_Fract", "_Accum" or "__auto_type". */ if (specs->typespec_word != cts_none) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } switch (i) { case RID_AUTO_TYPE: if (specs->long_p) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else { specs->typespec_word = cts_auto_type; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: specs->int_n_idx = i - RID_INT_N_0; if (!in_system_header_at (input_location)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support %<__int%d%> types", int_n_data[specs->int_n_idx].bitsize); if (specs->long_p) error_at (loc, ("both %<__int%d%> and %<long%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->short_p) error_at (loc, ("both %<__int%d%> and %<short%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (! int_n_enabled_p [specs->int_n_idx]) error_at (loc, "%<__int%d%> is not supported on this target", int_n_data[specs->int_n_idx].bitsize); else { specs->typespec_word = cts_int_n; specs->locations[cdw_typespec] = loc; } return specs; case RID_VOID: if (specs->long_p) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else { specs->typespec_word = cts_void; specs->locations[cdw_typespec] = loc; } return specs; case RID_BOOL: if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support boolean types"); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else { specs->typespec_word = cts_bool; specs->locations[cdw_typespec] = loc; } return specs; case RID_CHAR: if (specs->long_p) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else { specs->typespec_word = cts_char; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT: if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else { specs->typespec_word = cts_int; specs->locations[cdw_typespec] = loc; } return specs; case RID_FLOAT: if (specs->long_p) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else { specs->typespec_word = cts_float; specs->locations[cdw_typespec] = loc; } return specs; case RID_DOUBLE: if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else { specs->typespec_word = cts_double; specs->locations[cdw_typespec] = loc; } return specs; case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: { const char *str; if (i == RID_DFLOAT32) str = "_Decimal32"; else if (i == RID_DFLOAT64) str = "_Decimal64"; else str = "_Decimal128"; if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<%s%> in " "declaration specifiers"), str); if (specs->long_p) error_at (loc, ("both %<long%> and %<%s%> in " "declaration specifiers"), str); else if (specs->short_p) error_at (loc, ("both %<short%> and %<%s%> in " "declaration specifiers"), str); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<%s%> in " "declaration specifiers"), str); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<%s%> in " "declaration specifiers"), str); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_DFLOAT32) specs->typespec_word = cts_dfloat32; else if (i == RID_DFLOAT64) specs->typespec_word = cts_dfloat64; else specs->typespec_word = cts_dfloat128; specs->locations[cdw_typespec] = loc; } if (!targetm.decimal_float_supported_p ()) error_at (loc, ("decimal floating point not supported " "for this target")); pedwarn (loc, OPT_Wpedantic, "ISO C does not support decimal floating point"); return specs; case RID_FRACT: case RID_ACCUM: { const char *str; if (i == RID_FRACT) str = "_Fract"; else str = "_Accum"; if (specs->complex_p) error_at (loc, ("both %<complex%> and %<%s%> in " "declaration specifiers"), str); else if (i == RID_FRACT) specs->typespec_word = cts_fract; else specs->typespec_word = cts_accum; specs->locations[cdw_typespec] = loc; } if (!targetm.fixed_point_supported_p ()) error_at (loc, "fixed-point types not supported for this target"); pedwarn (loc, OPT_Wpedantic, "ISO C does not support fixed-point types"); return specs; default: /* ObjC reserved word "id", handled below. */ break; } } } /* Now we have a typedef (a TYPE_DECL node), an identifier (some form of ObjC type, cases such as "int" and "long" being handled above), a TYPE (struct, union, enum and typeof specifiers) or an ERROR_MARK. In none of these cases may there have previously been any type specifiers. */ if (specs->type || specs->typespec_word != cts_none || specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p || specs->complex_p) error_at (loc, "two or more data types in declaration specifiers"); else if (TREE_CODE (type) == TYPE_DECL) { if (TREE_TYPE (type) == error_mark_node) ; /* Allow the type to default to int to avoid cascading errors. */ else { specs->type = TREE_TYPE (type); specs->decl_attr = DECL_ATTRIBUTES (type); specs->typedef_p = true; specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type); specs->locations[cdw_typedef] = loc; /* If this typedef name is defined in a struct, then a C++ lookup would return a different value. */ if (warn_cxx_compat && I_SYMBOL_BINDING (DECL_NAME (type))->in_struct) warning_at (loc, OPT_Wc___compat, "C++ lookup of %qD would return a field, not a type", type); /* If we are parsing a struct, record that a struct field used a typedef. */ if (warn_cxx_compat && struct_parse_info != NULL) struct_parse_info->typedefs_seen.safe_push (type); } } else if (TREE_CODE (type) == IDENTIFIER_NODE) { tree t = lookup_name (type); if (!t || TREE_CODE (t) != TYPE_DECL) error_at (loc, "%qE fails to be a typedef or built in type", type); else if (TREE_TYPE (t) == error_mark_node) ; else { specs->type = TREE_TYPE (t); specs->locations[cdw_typespec] = loc; } } else { if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof) { specs->typedef_p = true; specs->locations[cdw_typedef] = loc; if (spec.expr) { if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr), specs->expr, spec.expr); else specs->expr = spec.expr; specs->expr_const_operands &= spec.expr_const_operands; } } specs->type = type; } return specs; } /* Add the storage class specifier or function specifier SCSPEC to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_scspec (source_location loc, struct c_declspecs *specs, tree scspec) { enum rid i; enum c_storage_class n = csc_none; bool dupe = false; specs->declspecs_seen_p = true; gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (scspec)); i = C_RID_CODE (scspec); if (specs->non_sc_seen_p) warning (OPT_Wold_style_declaration, "%qE is not at beginning of declaration", scspec); switch (i) { case RID_INLINE: /* C99 permits duplicate inline. Although of doubtful utility, it seems simplest to permit it in gnu89 mode as well, as there is also little utility in maintaining this as a difference between gnu89 and C99 inline. */ dupe = false; specs->inline_p = true; specs->locations[cdw_inline] = loc; break; case RID_NORETURN: /* Duplicate _Noreturn is permitted. */ dupe = false; specs->noreturn_p = true; specs->locations[cdw_noreturn] = loc; break; case RID_THREAD: dupe = specs->thread_p; if (specs->storage_class == csc_auto) error ("%qE used with %<auto%>", scspec); else if (specs->storage_class == csc_register) error ("%qE used with %<register%>", scspec); else if (specs->storage_class == csc_typedef) error ("%qE used with %<typedef%>", scspec); else { specs->thread_p = true; specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec), "__thread") == 0); /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spelling. */ if (!specs->thread_gnu_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %qE", scspec); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %qE", scspec); } specs->locations[cdw_thread] = loc; } break; case RID_AUTO: n = csc_auto; break; case RID_EXTERN: n = csc_extern; /* Diagnose "__thread extern". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<extern%>"); break; case RID_REGISTER: n = csc_register; break; case RID_STATIC: n = csc_static; /* Diagnose "__thread static". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<static%>"); break; case RID_TYPEDEF: n = csc_typedef; break; default: gcc_unreachable (); } if (n != csc_none && n == specs->storage_class) dupe = true; if (dupe) { if (i == RID_THREAD) error ("duplicate %<_Thread_local%> or %<__thread%>"); else error ("duplicate %qE", scspec); } if (n != csc_none) { if (specs->storage_class != csc_none && n != specs->storage_class) { error ("multiple storage classes in declaration specifiers"); } else { specs->storage_class = n; specs->locations[cdw_storage_class] = loc; if (n != csc_extern && n != csc_static && specs->thread_p) { error ("%qs used with %qE", specs->thread_gnu_p ? "__thread" : "_Thread_local", scspec); specs->thread_p = false; } } } return specs; } /* Add the attributes ATTRS to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_attrs (source_location loc, struct c_declspecs *specs, tree attrs) { specs->attrs = chainon (attrs, specs->attrs); specs->locations[cdw_attributes] = loc; specs->declspecs_seen_p = true; return specs; } /* Add an _Alignas specifier (expression ALIGN, or type whose alignment is ALIGN) to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_alignas (source_location loc, struct c_declspecs *specs, tree align) { int align_log; specs->alignas_p = true; specs->locations[cdw_alignas] = loc; if (align == error_mark_node) return specs; align_log = check_user_alignment (align, true); if (align_log > specs->align_log) specs->align_log = align_log; return specs; } /* Combine "long", "short", "signed", "unsigned" and "_Complex" type specifiers with any other type specifier to determine the resulting type. This is where ISO C checks on complex types are made, since "_Complex long" is a prefix of the valid ISO C type "_Complex long double". */ struct c_declspecs * finish_declspecs (struct c_declspecs *specs) { /* If a type was specified as a whole, we have no modifiers and are done. */ if (specs->type != NULL_TREE) { gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Set a dummy type. */ if (TREE_CODE (specs->type) == ERROR_MARK) specs->type = integer_type_node; return specs; } /* If none of "void", "_Bool", "char", "int", "float" or "double" has been specified, treat it as "int" unless "_Complex" is present and there are no other specifiers. If we just have "_Complex", it is equivalent to "_Complex double", but e.g. "_Complex short" is equivalent to "_Complex short int". */ if (specs->typespec_word == cts_none) { if (specs->saturating_p) { error_at (specs->locations[cdw_saturating], "%<_Sat%> is used without %<_Fract%> or %<_Accum%>"); if (!targetm.fixed_point_supported_p ()) error_at (specs->locations[cdw_saturating], "fixed-point types not supported for this target"); specs->typespec_word = cts_fract; } else if (specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p) { specs->typespec_word = cts_int; } else if (specs->complex_p) { specs->typespec_word = cts_double; pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support plain %<complex%> meaning " "%<double complex%>"); } else { specs->typespec_word = cts_int; specs->default_int_p = true; /* We don't diagnose this here because grokdeclarator will give more specific diagnostics according to whether it is a function definition. */ } } /* If "signed" was specified, record this to distinguish "int" and "signed int" in the case of a bit-field with -funsigned-bitfields. */ specs->explicit_signed_p = specs->signed_p; /* Now compute the actual type. */ switch (specs->typespec_word) { case cts_auto_type: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Type to be filled in later. */ break; case cts_void: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = void_type_node; break; case cts_bool: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = boolean_type_node; break; case cts_char: gcc_assert (!specs->long_p && !specs->short_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->signed_p) specs->type = signed_char_type_node; else if (specs->unsigned_p) specs->type = unsigned_char_type_node; else specs->type = char_type_node; if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int_n: gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); specs->type = (specs->unsigned_p ? int_n_trees[specs->int_n_idx].unsigned_type : int_n_trees[specs->int_n_idx].signed_type); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int: gcc_assert (!(specs->long_p && specs->short_p)); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->long_long_p) specs->type = (specs->unsigned_p ? long_long_unsigned_type_node : long_long_integer_type_node); else if (specs->long_p) specs->type = (specs->unsigned_p ? long_unsigned_type_node : long_integer_type_node); else if (specs->short_p) specs->type = (specs->unsigned_p ? short_unsigned_type_node : short_integer_type_node); else specs->type = (specs->unsigned_p ? unsigned_type_node : integer_type_node); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_float: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); specs->type = (specs->complex_p ? complex_float_type_node : float_type_node); break; case cts_double: gcc_assert (!specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (specs->long_p) { specs->type = (specs->complex_p ? complex_long_double_type_node : long_double_type_node); } else { specs->type = (specs->complex_p ? complex_double_type_node : double_type_node); } break; case cts_dfloat32: case cts_dfloat64: case cts_dfloat128: gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); if (specs->typespec_word == cts_dfloat32) specs->type = dfloat32_type_node; else if (specs->typespec_word == cts_dfloat64) specs->type = dfloat64_type_node; else specs->type = dfloat128_type_node; break; case cts_fract: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_fract_type_node : sat_long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_fract_type_node : sat_long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_fract_type_node : sat_short_fract_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_fract_type_node : sat_fract_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_fract_type_node : long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_fract_type_node : long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_fract_type_node : short_fract_type_node; else specs->type = specs->unsigned_p ? unsigned_fract_type_node : fract_type_node; } break; case cts_accum: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_accum_type_node : sat_long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_accum_type_node : sat_long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_accum_type_node : sat_short_accum_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_accum_type_node : sat_accum_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_accum_type_node : long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_accum_type_node : long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_accum_type_node : short_accum_type_node; else specs->type = specs->unsigned_p ? unsigned_accum_type_node : accum_type_node; } break; default: gcc_unreachable (); } return specs; } /* A subroutine of c_write_global_declarations. Perform final processing on one file scope's declarations (or the external scope's declarations), GLOBALS. */ static void c_write_global_declarations_1 (tree globals) { tree decl; bool reconsider; /* Process the decls in the order they were written. */ for (decl = globals; decl; decl = DECL_CHAIN (decl)) { /* Check for used but undefined static functions using the C standard's definition of "used", and set TREE_NO_WARNING so that check_global_declarations doesn't repeat the check. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == 0 && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl) && C_DECL_USED (decl)) { pedwarn (input_location, 0, "%q+F used but never defined", decl); TREE_NO_WARNING (decl) = 1; } wrapup_global_declaration_1 (decl); } do { reconsider = false; for (decl = globals; decl; decl = DECL_CHAIN (decl)) reconsider |= wrapup_global_declaration_2 (decl); } while (reconsider); for (decl = globals; decl; decl = DECL_CHAIN (decl)) check_global_declaration_1 (decl); } /* A subroutine of c_write_global_declarations Emit debug information for each of the declarations in GLOBALS. */ static void c_write_global_declarations_2 (tree globals) { tree decl; for (decl = globals; decl ; decl = DECL_CHAIN (decl)) debug_hooks->global_decl (decl); } /* Callback to collect a source_ref from a DECL. */ static void collect_source_ref_cb (tree decl) { if (!DECL_IS_BUILTIN (decl)) collect_source_ref (LOCATION_FILE (decl_sloc (decl, false))); } /* Preserve the external declarations scope across a garbage collect. */ static GTY(()) tree ext_block; /* Collect all references relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { tree t; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file); collect_ada_nodes (BLOCK_VARS (ext_block), source_file); } /* Iterate over all global declarations and call CALLBACK. */ static void for_each_global_decl (void (*callback) (tree decl)) { tree t; tree decls; tree decl; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) { decls = DECL_INITIAL (t); for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl)) callback (decl); } for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl)) callback (decl); } void c_write_global_declarations (void) { tree t; unsigned i; /* We don't want to do this if generating a PCH. */ if (pch_file) return; timevar_start (TV_PHASE_DEFERRED); /* Do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). */ if (c_dialect_objc ()) objc_write_global_declarations (); /* Close the external scope. */ ext_block = pop_scope (); external_scope = 0; gcc_assert (!current_scope); /* Handle -fdump-ada-spec[-slim]. */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { /* Build a table of files to generate specs for */ if (flag_dump_ada_spec_slim) collect_source_ref (main_input_filename); else for_each_global_decl (collect_source_ref_cb); dump_ada_specs (collect_all_refs, NULL); } if (ext_block) { tree tmp = BLOCK_VARS (ext_block); int flags; FILE * stream = dump_begin (TDI_tu, &flags); if (stream && tmp) { dump_node (tmp, flags & ~TDF_SLIM, stream); dump_end (TDI_tu, stream); } } /* Process all file scopes in this compilation, and the external_scope, through wrapup_global_declarations and check_global_declarations. */ FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_1 (BLOCK_VARS (ext_block)); timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_OPT_GEN); /* We're done parsing; proceed to optimize and emit assembly. FIXME: shouldn't be the front end's responsibility to call this. */ symtab->finalize_compilation_unit (); timevar_stop (TV_PHASE_OPT_GEN); timevar_start (TV_PHASE_DBGINFO); /* After cgraph has had a chance to emit everything that's going to be emitted, output debug information for globals. */ if (!seen_error ()) { timevar_push (TV_SYMOUT); FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_2 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_2 (BLOCK_VARS (ext_block)); timevar_pop (TV_SYMOUT); } ext_block = NULL; timevar_stop (TV_PHASE_DBGINFO); } /* Register reserved keyword WORD as qualifier for address space AS. */ void c_register_addr_space (const char *word, addr_space_t as) { int rid = RID_FIRST_ADDR_SPACE + as; tree id; /* Address space qualifiers are only supported in C with GNU extensions enabled. */ if (c_dialect_objc () || flag_no_asm) return; id = get_identifier (word); C_SET_RID_CODE (id, rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [rid] = id; } /* Return identifier to look up for omp declare reduction. */ tree c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id) { const char *p = NULL; switch (reduction_code) { case PLUS_EXPR: p = "+"; break; case MULT_EXPR: p = "*"; break; case MINUS_EXPR: p = "-"; break; case BIT_AND_EXPR: p = "&"; break; case BIT_XOR_EXPR: p = "^"; break; case BIT_IOR_EXPR: p = "|"; break; case TRUTH_ANDIF_EXPR: p = "&&"; break; case TRUTH_ORIF_EXPR: p = "||"; break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); size_t len = strlen (p); char *name = XALLOCAVEC (char, lenp + len); memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); return get_identifier (name); } /* Lookup REDUCTION_ID in the current scope, or create an artificial VAR_DECL, bind it into the current scope and return it. */ tree c_omp_reduction_decl (tree reduction_id) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); if (b != NULL && B_IN_CURRENT_SCOPE (b)) return b->decl; tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL, reduction_id, integer_type_node); DECL_ARTIFICIAL (decl) = 1; DECL_EXTERNAL (decl) = 1; TREE_STATIC (decl) = 1; TREE_PUBLIC (decl) = 0; bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION); return decl; } /* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */ tree c_omp_reduction_lookup (tree reduction_id, tree type) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); while (b) { tree t; for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) return TREE_VALUE (t); b = b->shadowed; } return error_mark_node; } /* Helper function called via walk_tree, to diagnose invalid #pragma omp declare reduction combiners or initializers. */ tree c_check_omp_declare_reduction_r (tree *tp, int *, void *data) { tree *vars = (tree *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != vars[0] && *tp != vars[1]) { location_t loc = DECL_SOURCE_LOCATION (vars[0]); if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } #include "gt-c-c-decl.h"
TranslationTable.h
// Modified from the original file of Chris Dyer // Copyright 2017 by Hao Wang, modified from the original code of Chris Dyer // provided in https://github.com/clab/fast_align/blob/master/src/ttables.h // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef TRANSLATION_TABLE_H #define TRANSLATION_TABLE_H typedef vector<W2Double>::const_iterator const_iterator; static double digamma(double x) { double result = 0, xx, xx2, xx4; for (; x < 7; ++x) result -= 1 / x; x -= 1.0 / 2.0; xx = 1.0 / x; xx2 = xx * xx; xx4 = xx2 * xx2; result += log(x) + (1. / 24.) * xx2 - (7.0 / 960.0) * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; return result; } class TranslationTable { public: TranslationTable() : initialized_(false), frozen_(false){}; TranslationTable(const TranslationTable &other) : initialized_(other.initialized_), frozen_(other.frozen_) { tt = other.tt; counts = other.counts; } ~TranslationTable(){}; inline double Prob(const unsigned &f, const unsigned &e) const { return initialized_ ? tt[f].find(e)->second : 1e-9; } inline double safe_Prob(const unsigned &f, const unsigned &e) const { if (f < static_cast<unsigned>(tt.size())) { const W2Double &cpd = tt[f]; const W2Double::const_iterator it = cpd.find(e); if (it == cpd.end()) return 1e-7; return it->second; } else return 1e-7; } inline void Insert(const unsigned f, const unsigned e) { // NOT thread safe if (f >= counts.size()) counts.resize(f + 1); counts[f][e] = 0; } inline void SetMaxF(const unsigned f) { // NOT thread safe if (f >= counts.size()) { counts.resize(f + 1); } } inline void safe_SetValue(const unsigned f, const unsigned e, const double x) { if (f >= tt.size()) tt.resize(f + 1); tt[f][e] = x; } inline void Increment(const unsigned f, const unsigned e, const double x) { // NOT thread safe counts[f].find(e)->second += x; } void Normalize() { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double &cpd = tt[f]; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second; if (!total) total = 1; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) it->second /= total; } ClearCounts(); } void NormalizeVB(const double alpha) { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double &cpd = tt[f]; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second + alpha; if (!total) total = 1; const double digamma_total = digamma(total); for (W2Double::iterator it = cpd.begin(); it != cpd.end(); ++it) it->second = exp(digamma(it->second + alpha) - digamma_total); } ClearCounts(); } void SetInitialized() { CHECK(!initialized_, "#ERROR! tt has been initialized."); if (!initialized_) { tt.resize(counts.size()); for (unsigned i = 0; i < counts.size(); i++) { tt[i] = counts[i]; } } initialized_ = true; } void Freeze() { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); frozen_ = true; } const_iterator begin() const { return tt.begin(); } const_iterator end() const { return tt.end(); } W2Double operator[](int i) const { return tt[i]; } size_t size() const { return tt.size(); } void clear() { counts.clear(); tt.clear(); frozen_ = false; initialized_ = false; } void Merge(TranslationTable &other, const bool &reverse = true) { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); //require this and other have same entries. #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { // remove NULL from F list W2Double &cpd = tt[f]; for (W2Double::iterator it = cpd.begin(); it != cpd.end(); it++) { const unsigned e = it->first; const double score = (reverse) ? other.safe_Prob(e, f) : other.safe_Prob(f, e); it->second = sqrt(it->second * score); } } } void WriteTranslationTable(ofstream *file, const SimpleWordWrapper &sw2id, const SimpleWordWrapper &tw2id, const double &threshold, const bool &keepNull) { CHECK(frozen_, "#ERROR! tt has not frozen."); if ((*file).is_open()) { for (unsigned f = (keepNull) ? 0 : 1; f < tt.size(); f++) { for (auto const &e : tt[f]) { const double score = e.second; if (score >= threshold) (*file) << sw2id.decode(f) << " " << tw2id.decode(e.first) << " " << score << "\n"; } } (*file).close(); } } void LoadTranslationTable(ifstream *file, SimpleWordWrapper &sw2id, SimpleWordWrapper &tw2id) { CHECK(!frozen_, "#ERROR! tt has frozen."); int i; int effective = 0; string line; for (i = 0; !getline(*file, line).eof(); i++) { const vector<string> tokens = Split(line, " "); CHECK(tokens.size() == 3, "#ERROR! model format wrong: " + line); const string &f_str = tokens[0]; const string &e_str = tokens[1]; unsigned f = sw2id.encode(f_str); unsigned e = tw2id.encode(e_str); if (f && e) { safe_SetValue(f, e, stod(tokens[2])); effective++; } } initialized_ = true; (*file).close(); cerr << "# of entries: \t[" << i << "]" << endl; cerr << "# of effective entries:\t[" << effective << "]" << endl; } void ClearCounts() { #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < counts.size(); i++) for (auto &cnt : counts[i]) cnt.second = 0.0; } private: W2WDouble tt; W2WDouble counts; double threshold_ = 1e-6; bool initialized_; // Disallow new e,f pairs to be added to counts bool frozen_; // Disallow new e,f pairs to be added to counts }; #endif // TRANSLATION_TABLE_H
// Modified from the original file of Chris Dyer // Copyright 2017 by Hao Wang, modified from the original code of Chris Dyer // provided in https://github.com / clab / fast_align / blob / master / src / ttables.h // Licensed under the Apache License, Version 2.0(the "License"); //you may not use this file except in compliance with the License. // You may obtain a copy of the License at // //http://www.apache.org / licenses / LICENSE - 2.0 // //Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef TRANSLATION_TABLE_H #define TRANSLATION_TABLE_H typedef vector < W2Double >: :const_iterator const_iterator; static double digamma(double x) { double result = 0, xx, xx2, xx4; for (; x < 7; ++x) result -= 1 / x; x -= 1.0 / 2.0; xx = 1.0 / x; xx2 = xx * xx; xx4 = xx2 * xx2; result += log(x) + (1. / 24.) * xx2 - (7.0 / 960.0) * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; return result; } class TranslationTable { public: TranslationTable():initialized_(false), frozen_(false) { }; TranslationTable(const TranslationTable & other):initialized_(other.initialized_), frozen_(other.frozen_) { tt = other.tt; counts = other.counts; } ~TranslationTable() { }; inline double Prob(const unsigned &f, const unsigned &e)const { return initialized_ ? tt[f].find(e)->second : 1e-9; } inline double safe_Prob(const unsigned &f, const unsigned &e)const { if (f < static_cast < unsigned >(tt.size())) { const W2Double & cpd = tt[f]; const W2Double::const_iterator it = cpd.find(e); if (it == cpd.end()) return 1e-7; return it->second; } else return 1e-7; } inline void Insert(const unsigned f, const unsigned e) { //NOT thread safe if (f >= counts.size()) counts.resize(f + 1); counts[f][e] = 0; } inline void SetMaxF(const unsigned f) { //NOT thread safe if (f >= counts.size()) { counts.resize(f + 1); } } inline void safe_SetValue(const unsigned f, const unsigned e, const double x) { if (f >= tt.size()) tt.resize(f + 1); tt[f][e] = x; } inline void Increment(const unsigned f, const unsigned e, const double x) { //NOT thread safe counts[f].find(e)->second += x; } void Normalize() { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double & cpd = tt[f]; for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second; if (!total) total = 1; for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) it->second /= total; } ClearCounts(); } void NormalizeVB(const double alpha) { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double & cpd = tt[f]; for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second + alpha; if (!total) total = 1; const double digamma_total = digamma(total); for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) it->second = exp(digamma(it->second + alpha) - digamma_total); } ClearCounts(); } void SetInitialized() { CHECK(!initialized_, "#ERROR! tt has been initialized."); if (!initialized_) { tt.resize(counts.size()); for (unsigned i = 0; i < counts.size(); i++) { tt[i] = counts[i]; } } initialized_ = true; } void Freeze() { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); frozen_ = true; } const_iterator begin() const { return tt.begin(); } const_iterator end() const { return tt.end(); } W2Double operator[] (int i)const { return tt[i]; } size_t size() const { return tt.size(); } void clear() { counts.clear(); tt.clear(); frozen_ = false; initialized_ = false; } void Merge(TranslationTable & other, const bool & reverse = true) { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); //require this and other have same entries. for (unsigned f = 0; f < tt.size(); f++) { //remove NULL from F list W2Double & cpd = tt[f]; for (W2Double: :iterator it = cpd.begin(); it != cpd.end(); it++) { const unsigned e = it->first; const double score = (reverse) ? other.safe_Prob(e, f) : other.safe_Prob(f, e); it->second = sqrt(it->second * score); } } } void WriteTranslationTable(ofstream * file, const SimpleWordWrapper & sw2id, const SimpleWordWrapper & tw2id, const double &threshold, const bool & keepNull) { CHECK(frozen_, "#ERROR! tt has not frozen."); if ((*file).is_open()) { for (unsigned f = (keepNull) ? 0 : 1; f < tt.size(); f++) { for (auto const &e:tt[f]) { const double score = e.second; if (score >= threshold) (*file) << sw2id.decode(f) << " " << tw2id.decode(e.first) << " " << score << "\n"; } } (*file).close(); } } void LoadTranslationTable(ifstream * file, SimpleWordWrapper & sw2id, SimpleWordWrapper & tw2id) { CHECK(!frozen_, "#ERROR! tt has frozen."); int i; int effective = 0; string line; for (i = 0; !getline(*file, line).eof(); i++) { const vector < string > tokens = Split(line, " "); CHECK(tokens.size() == 3, "#ERROR! model format wrong: " + line); const string & f_str = tokens[0]; const string & e_str = tokens[1]; unsigned f = sw2id.encode(f_str); unsigned e = tw2id.encode(e_str); if (f && e) { safe_SetValue(f, e, stod(tokens[2])); effective++; } } initialized_ = true; (*file).close(); cerr << "# of entries: \t[" << i << "]" << endl; cerr << "# of effective entries:\t[" << effective << "]" << endl; } void ClearCounts() { for (size_t i = 0; i < counts.size(); i++) for (auto & cnt:counts[i]) cnt.second = 0.0; } private: W2WDouble tt; W2WDouble counts; double threshold_ = 1e-6; bool initialized_; //Disallow new e, f pairs to be added to counts bool frozen_; //Disallow new e, f pairs to be added to counts }; #endif /* // TRANSLATION_TABLE_H */
// Modified from the original file of Chris Dyer // Copyright 2017 by Hao Wang, modified from the original code of Chris Dyer // provided in https://github.com / clab / fast_align / blob / master / src / ttables.h // Licensed under the Apache License, Version 2.0(the "License"); //you may not use this file except in compliance with the License. // You may obtain a copy of the License at // //http://www.apache.org / licenses / LICENSE - 2.0 // //Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef TRANSLATION_TABLE_H #define TRANSLATION_TABLE_H typedef vector < W2Double >: :const_iterator const_iterator; static double digamma(double x) { double result = 0, xx, xx2, xx4; for (; x < 7; ++x) result -= 1 / x; x -= 1.0 / 2.0; xx = 1.0 / x; xx2 = xx * xx; xx4 = xx2 * xx2; result += log(x) + (1. / 24.) * xx2 - (7.0 / 960.0) * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; return result; } class TranslationTable { public: TranslationTable():initialized_(false), frozen_(false) { }; TranslationTable(const TranslationTable & other):initialized_(other.initialized_), frozen_(other.frozen_) { tt = other.tt; counts = other.counts; } ~TranslationTable() { }; inline double Prob(const unsigned &f, const unsigned &e)const { return initialized_ ? tt[f].find(e)->second : 1e-9; } inline double safe_Prob(const unsigned &f, const unsigned &e)const { if (f < static_cast < unsigned >(tt.size())) { const W2Double & cpd = tt[f]; const W2Double::const_iterator it = cpd.find(e); if (it == cpd.end()) return 1e-7; return it->second; } else return 1e-7; } inline void Insert(const unsigned f, const unsigned e) { //NOT thread safe if (f >= counts.size()) counts.resize(f + 1); counts[f][e] = 0; } inline void SetMaxF(const unsigned f) { //NOT thread safe if (f >= counts.size()) { counts.resize(f + 1); } } inline void safe_SetValue(const unsigned f, const unsigned e, const double x) { if (f >= tt.size()) tt.resize(f + 1); tt[f][e] = x; } inline void Increment(const unsigned f, const unsigned e, const double x) { //NOT thread safe counts[f].find(e)->second += x; } void Normalize() { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double & cpd = tt[f]; for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second; if (!total) total = 1; for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) it->second /= total; } ClearCounts(); } void NormalizeVB(const double alpha) { CHECK(!frozen_, "#ERROR! tt has been initialized."); tt.swap(counts); #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { double total = 0; W2Double & cpd = tt[f]; for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) total += it->second + alpha; if (!total) total = 1; const double digamma_total = digamma(total); for (W2Double: : iterator it = cpd.begin(); it != cpd.end(); ++it) it->second = exp(digamma(it->second + alpha) - digamma_total); } ClearCounts(); } void SetInitialized() { CHECK(!initialized_, "#ERROR! tt has been initialized."); if (!initialized_) { tt.resize(counts.size()); for (unsigned i = 0; i < counts.size(); i++) { tt[i] = counts[i]; } } initialized_ = true; } void Freeze() { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); frozen_ = true; } const_iterator begin() const { return tt.begin(); } const_iterator end() const { return tt.end(); } W2Double operator[] (int i)const { return tt[i]; } size_t size() const { return tt.size(); } void clear() { counts.clear(); tt.clear(); frozen_ = false; initialized_ = false; } void Merge(TranslationTable & other, const bool & reverse = true) { CHECK(!frozen_, "#ERROR! tt has frozen."); counts.clear(); //require this and other have same entries. #pragma omp parallel for schedule(dynamic) for (unsigned f = 0; f < tt.size(); f++) { //remove NULL from F list W2Double & cpd = tt[f]; for (W2Double: :iterator it = cpd.begin(); it != cpd.end(); it++) { const unsigned e = it->first; const double score = (reverse) ? other.safe_Prob(e, f) : other.safe_Prob(f, e); it->second = sqrt(it->second * score); } } } void WriteTranslationTable(ofstream * file, const SimpleWordWrapper & sw2id, const SimpleWordWrapper & tw2id, const double &threshold, const bool & keepNull) { CHECK(frozen_, "#ERROR! tt has not frozen."); if ((*file).is_open()) { for (unsigned f = (keepNull) ? 0 : 1; f < tt.size(); f++) { for (auto const &e:tt[f]) { const double score = e.second; if (score >= threshold) (*file) << sw2id.decode(f) << " " << tw2id.decode(e.first) << " " << score << "\n"; } } (*file).close(); } } void LoadTranslationTable(ifstream * file, SimpleWordWrapper & sw2id, SimpleWordWrapper & tw2id) { CHECK(!frozen_, "#ERROR! tt has frozen."); int i; int effective = 0; string line; for (i = 0; !getline(*file, line).eof(); i++) { const vector < string > tokens = Split(line, " "); CHECK(tokens.size() == 3, "#ERROR! model format wrong: " + line); const string & f_str = tokens[0]; const string & e_str = tokens[1]; unsigned f = sw2id.encode(f_str); unsigned e = tw2id.encode(e_str); if (f && e) { safe_SetValue(f, e, stod(tokens[2])); effective++; } } initialized_ = true; (*file).close(); cerr << "# of entries: \t[" << i << "]" << endl; cerr << "# of effective entries:\t[" << effective << "]" << endl; } void ClearCounts() { #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < counts.size(); i++) for (auto & cnt:counts[i]) cnt.second = 0.0; } private: W2WDouble tt; W2WDouble counts; double threshold_ = 1e-6; bool initialized_; //Disallow new e, f pairs to be added to counts bool frozen_; //Disallow new e, f pairs to be added to counts }; #endif /* // TRANSLATION_TABLE_H */
schedule-test.c
/** * Generate predictable page access patterns in order to sanity check the page * analyis trace & thread placement framework. There are several access * patterms implemented, but each pattern has 4 threads sharing an individual * page and hence those 4 threads should be placed together by the partitioning * algorithm. * * Author: Rob Lyerly <rlyerly@vt.edu> * Date: 1/24/2018 */ #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <time.h> #include <omp.h> #include <sys/sysinfo.h> #define XSTR( x ) STR(x) #define STR( x ) #x /* * Knobs controlling length of OpenMP sections & memory used. The number of * iterations can be set at command line. */ #define ITERS 2048 #define PAGES 1024 /* Size definitions */ #define PAGESZ 4096 #define INTS_PER_PAGE (PAGESZ / sizeof(int)) #define ARRSIZE (PAGES * INTS_PER_PAGE) #define CHUNKSZ (INTS_PER_PAGE / 4) #define NS( ts ) ((ts.tv_sec * 1000000000LU) + ts.tv_nsec) typedef int page_array_t [ARRSIZE] __attribute__((aligned(4096))); static page_array_t thearray; #define helptext \ "Generate a predictable page access pattern to sanity check the thread\ placement framework.\n\n\ Usage: thread-schedule [ OPTIONS ]\n\ Options:\n\ -h : print help & exit\n\ -i num : number of iterations to run each access pattern (default: " XSTR(ITERS) ")\n\ -t num : number of threads to use\n" void parse_args(int argc, char** argv, size_t *threads, size_t *iters) { int c; while((c = getopt(argc, argv, "hi:t:")) != -1) { switch(c) { default: printf("WARNING: Ignoring unknown argument '%c'\n", c); break; case 'h': printf(helptext); exit(0); break; case 'i': *iters = atoi(optarg); break; case 't': *threads = atoi(optarg); break; } } } void randomize(page_array_t array) { size_t i; #pragma omp parallel for for(i = 0; i < ARRSIZE; i++) array[i] = rand() % 1024; } /* * Pattern 1: groups of 4 consecutive threads should be mapped to the same * node. */ void add1(page_array_t array, const size_t iters) { size_t iter, i; struct timespec start, end; printf("Region 1: consecutive threads access the same page..."); fflush(stdout); clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel private(iter) { // TODO make this region 1 for(iter = 0; iter < iters; iter++) { #pragma omp for schedule(static, CHUNKSZ) for(i = 0; i < ARRSIZE; i++) array[i] += 1; } } /* end parallel */ clock_gettime(CLOCK_MONOTONIC, &end); printf("%lu ms\n", (NS(end) - NS(start)) / 1000000); } /* * Pattern 2: threads with the same parity should be mapped to the same node * (e.g., evens with evens, odds with odds). */ void add2(page_array_t array, const size_t iters) { size_t iter, i; struct timespec start, end; printf("Region 2: threads with the same parity access the same page..."); fflush(stdout); clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel private(iter) { long offset; int thread = omp_get_thread_num() % 8; if(thread % 2) offset = (4 + (thread / 2) - thread) * CHUNKSZ; else offset = -((thread / 2) * CHUNKSZ); // TODO make this region 2 for(iter = 0; iter < iters; iter++) { #pragma omp for schedule(static, CHUNKSZ) for(i = 0; i < ARRSIZE; i++) array[i + offset] += 2; } } /* end parallel */ clock_gettime(CLOCK_MONOTONIC, &end); printf("%lu ms\n", (NS(end) - NS(start)) / 1000000); } int main(int argc, char** argv) { size_t threads = get_nprocs_conf(), iters = ITERS; struct timespec start, end; parse_args(argc, argv, &threads, &iters); omp_set_num_threads(threads); randomize(thearray); printf("--------------------\nTHREAD SCHEDULE TEST\n--------------------\n"); printf("Running %lu iterations with %lu threads...\n", iters, threads); clock_gettime(CLOCK_MONOTONIC, &start); add1(thearray, iters); add2(thearray, iters); clock_gettime(CLOCK_MONOTONIC, &end); printf("Total execution time: %lu ms\n", (NS(end) - NS(start)) / 1000000); }
/** * Generate predictable page access patterns in order to sanity check the page * analyis trace & thread placement framework. There are several access * patterms implemented, but each pattern has 4 threads sharing an individual * page and hence those 4 threads should be placed together by the partitioning * algorithm. * * Author: Rob Lyerly <rlyerly@vt.edu> * Date: 1/24/2018 */ #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <time.h> #include <omp.h> #include <sys/sysinfo.h> #define XSTR( x ) STR(x) #define STR( x ) #x /* * Knobs controlling length of OpenMP sections & memory used. The number of * iterations can be set at command line. */ #define ITERS 2048 #define PAGES 1024 /* Size definitions */ #define PAGESZ 4096 #define INTS_PER_PAGE (PAGESZ / sizeof(int)) #define ARRSIZE (PAGES * INTS_PER_PAGE) #define CHUNKSZ (INTS_PER_PAGE / 4) #define NS( ts ) ((ts.tv_sec * 1000000000LU) + ts.tv_nsec) typedef int page_array_t[ARRSIZE] __attribute__((aligned(4096))); static page_array_t thearray; #define helptext \ "Generate a predictable page access pattern to sanity check the thread\ placement framework.\n\n\ Usage: thread-schedule [ OPTIONS ]\n\ Options:\n\ -h : print help & exit\n\ -i num : number of iterations to run each access pattern (default: " XSTR(ITERS) ")\n\ -t num : number of threads to use\n" void parse_args(int argc, char **argv, size_t * threads, size_t * iters) { int c; while ((c = getopt(argc, argv, "hi:t:")) != -1) { switch (c) { default: printf("WARNING: Ignoring unknown argument '%c'\n", c); break; case 'h': printf(helptext); exit(0); break; case 'i': *iters = atoi(optarg); break; case 't': *threads = atoi(optarg); break; } } } void randomize(page_array_t array) { size_t i; for (i = 0; i < ARRSIZE; i++) array[i] = rand() % 1024; } /* * Pattern 1: groups of 4 consecutive threads should be mapped to the same * node. */ void add1(page_array_t array, const size_t iters) { size_t iter, i; struct timespec start, end; printf("Region 1: consecutive threads access the same page..."); fflush(stdout); clock_gettime(CLOCK_MONOTONIC, &start); //TODO make this region 1 for (iter = 0; iter < iters; iter++) { for (i = 0; i < ARRSIZE; i++) array[i] += 1; } /* end parallel */ clock_gettime(CLOCK_MONOTONIC, &end); printf("%lu ms\n", (NS(end) - NS(start)) / 1000000); } /* * Pattern 2: threads with the same parity should be mapped to the same node * (e.g., evens with evens, odds with odds). */ void add2(page_array_t array, const size_t iters) { size_t iter, i; struct timespec start, end; printf("Region 2: threads with the same parity access the same page..."); fflush(stdout); clock_gettime(CLOCK_MONOTONIC, &start); long offset; int thread = omp_get_thread_num() % 8; if (thread % 2) offset = (4 + (thread / 2) - thread) * CHUNKSZ; else offset = -((thread / 2) * CHUNKSZ); //TODO make this region 2 for (iter = 0; iter < iters; iter++) { for (i = 0; i < ARRSIZE; i++) array[i + offset] += 2; } /* end parallel */ clock_gettime(CLOCK_MONOTONIC, &end); printf("%lu ms\n", (NS(end) - NS(start)) / 1000000); } int main(int argc, char **argv) { size_t threads = get_nprocs_conf(), iters = ITERS; struct timespec start, end; parse_args(argc, argv, &threads, &iters); omp_set_num_threads(threads); randomize(thearray); printf("--------------------\nTHREAD SCHEDULE TEST\n--------------------\n"); printf("Running %lu iterations with %lu threads...\n", iters, threads); clock_gettime(CLOCK_MONOTONIC, &start); add1(thearray, iters); add2(thearray, iters); clock_gettime(CLOCK_MONOTONIC, &end); printf("Total execution time: %lu ms\n", (NS(end) - NS(start)) / 1000000); }
/** * Generate predictable page access patterns in order to sanity check the page * analyis trace & thread placement framework. There are several access * patterms implemented, but each pattern has 4 threads sharing an individual * page and hence those 4 threads should be placed together by the partitioning * algorithm. * * Author: Rob Lyerly <rlyerly@vt.edu> * Date: 1/24/2018 */ #include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <time.h> #include <omp.h> #include <sys/sysinfo.h> #define XSTR( x ) STR(x) #define STR( x ) #x /* * Knobs controlling length of OpenMP sections & memory used. The number of * iterations can be set at command line. */ #define ITERS 2048 #define PAGES 1024 /* Size definitions */ #define PAGESZ 4096 #define INTS_PER_PAGE (PAGESZ / sizeof(int)) #define ARRSIZE (PAGES * INTS_PER_PAGE) #define CHUNKSZ (INTS_PER_PAGE / 4) #define NS( ts ) ((ts.tv_sec * 1000000000LU) + ts.tv_nsec) typedef int page_array_t[ARRSIZE] __attribute__((aligned(4096))); static page_array_t thearray; #define helptext \ "Generate a predictable page access pattern to sanity check the thread\ placement framework.\n\n\ Usage: thread-schedule [ OPTIONS ]\n\ Options:\n\ -h : print help & exit\n\ -i num : number of iterations to run each access pattern (default: " XSTR(ITERS) ")\n\ -t num : number of threads to use\n" void parse_args(int argc, char **argv, size_t * threads, size_t * iters) { int c; while ((c = getopt(argc, argv, "hi:t:")) != -1) { switch (c) { default: printf("WARNING: Ignoring unknown argument '%c'\n", c); break; case 'h': printf(helptext); exit(0); break; case 'i': *iters = atoi(optarg); break; case 't': *threads = atoi(optarg); break; } } } void randomize(page_array_t array) { size_t i; #pragma omp parallel for for (i = 0; i < ARRSIZE; i++) array[i] = rand() % 1024; } /* * Pattern 1: groups of 4 consecutive threads should be mapped to the same * node. */ void add1(page_array_t array, const size_t iters) { size_t iter, i; struct timespec start, end; printf("Region 1: consecutive threads access the same page..."); fflush(stdout); clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel private(iter) { //TODO make this region 1 for (iter = 0; iter < iters; iter++) { #pragma omp for schedule(static, CHUNKSZ) for (i = 0; i < ARRSIZE; i++) array[i] += 1; } } /* end parallel */ clock_gettime(CLOCK_MONOTONIC, &end); printf("%lu ms\n", (NS(end) - NS(start)) / 1000000); } /* * Pattern 2: threads with the same parity should be mapped to the same node * (e.g., evens with evens, odds with odds). */ void add2(page_array_t array, const size_t iters) { size_t iter, i; struct timespec start, end; printf("Region 2: threads with the same parity access the same page..."); fflush(stdout); clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp parallel private(iter) { long offset; int thread = omp_get_thread_num() % 8; if (thread % 2) offset = (4 + (thread / 2) - thread) * CHUNKSZ; else offset = -((thread / 2) * CHUNKSZ); //TODO make this region 2 for (iter = 0; iter < iters; iter++) { #pragma omp for schedule(static, CHUNKSZ) for (i = 0; i < ARRSIZE; i++) array[i + offset] += 2; } } /* end parallel */ clock_gettime(CLOCK_MONOTONIC, &end); printf("%lu ms\n", (NS(end) - NS(start)) / 1000000); } int main(int argc, char **argv) { size_t threads = get_nprocs_conf(), iters = ITERS; struct timespec start, end; parse_args(argc, argv, &threads, &iters); omp_set_num_threads(threads); randomize(thearray); printf("--------------------\nTHREAD SCHEDULE TEST\n--------------------\n"); printf("Running %lu iterations with %lu threads...\n", iters, threads); clock_gettime(CLOCK_MONOTONIC, &start); add1(thearray, iters); add2(thearray, iters); clock_gettime(CLOCK_MONOTONIC, &end); printf("Total execution time: %lu ms\n", (NS(end) - NS(start)) / 1000000); }
imd_colrad.h
#include "imd.h" #include <sys/time.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_errno.h> // #define USEFLOAT // hauptsächlich in der funktion genexptint. Profiling zeigte, dass // hier die meiste zeit verbraucht wird -> float verdoppelt performance #ifdef USEFLOAT typedef float Real; #define REALTYPE MPI_FLOAT #else typedef double Real; #define REALTYPE MPI_DOUBLE #endif #ifdef USEFLOAT #define EXPR expf //exp zu floaten ist eine ganz mieeese idee #define SQRTR sqrtf #define POWR powf #define LOGR logf #else #define EXPR exp #define SQRTR sqrt #define POWR pow #define LOGR log #endif // ********************************************************* // PHYSICAL CONSTANTS // ********************************************************* // const double eV2J=1.6021766E-19; const Real eV2H=0.03674932; //eV to Hartree const Real colrad_reltol=1e-5; const Real colrad_abstol=10.0; // const Real J2eV=6.2415091E18; const Real planck=6.62607004E-34; // J/s const Real bohr_radius=0.52917721067E-10; // m const Real bohr_radius_sq=2.800285202924816e-21; const Real hbar_cub=1.172812163789953e-102; //hbar^3 const Real double_emass_pow_3_2 = 2.459112949719466e-45; // (2*emass)^3/2 const int MAXLINE = 255; const Real pi=3.141592653589793; const Real pi_sq=9.869604401089358; const Real E_ion_H=13.6; // eV const Real E_ion_H_J=2.178960176000000e-18; // J const Real E_ion_H_sq_J=4.747867448593952e-36; const Real colrad_tequi=1e-12;//TEST// 1e-12; //bei initial equi ohne Temperatur-variation erst einmal //die Saha-besetzungsdichten equilibrieren //const double LIGHTSPEED=2.997925458e8; // m/s Real LASERFREQ; int colrad_ydot(double t, N_Vector u, N_Vector udot, void *user_data); void do_Saha(Real Te,Real totalc,Real ne,N_Vector y); int colrad_GetCoeffs(N_Vector y,Real It, void * user_data); // Die Zwei müssen nach Prototypes.h // void do_colrad(double dt); // void colrad_init(void); void colrad_read_states(void); void colrad_Saha_init(int i,int j,int k); // ****************************************************************************** // * CROSS SECTION INTEGRATION STUFF // ****************************************************************************** gsl_integration_workspace * winteg_inner=NULL; gsl_integration_workspace * winteg_outer=NULL; gsl_integration_workspace * winteg_fermi=NULL; gsl_integration_workspace * winteg_exc=NULL; //excitation gsl_integration_romberg_workspace * winteg_rb_inner=NULL; gsl_integration_romberg_workspace * winteg_rb_outer=NULL; struct my_f_params { Real ne; Real T;Real mu; Real E;Real DeltaE; int allowed;}; // struct my_f_params fparams_inner; //For inner integrand // struct my_f_params fparams_outer; //outer integrand // struct my_f_params fparams_fermi; // struct my_f_params fparams_exc; double inner_integrand_ionization(double x, void *p); // integrate along E' double outer_integrand_ionization(double x,void *p); // integrate along E Real double_integral_ionization(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral double inner_integrand_recombination(double x, void *p); double outer_integrand_recombination(double x,void *p); Real double_integral_recombination(Real ne,Real T, Real mu, Real DeltaE); double integrand_excitation(double x,void *p); Real eval_excitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed); Real eval_dexcitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed); double integrand_deexcitation(double x,void *p); double fermi_integrand(double x, void *p); Real eval_fermi_integrand(Real ne,Real T, Real mu); double integrand_excitation_debug(double x,void *p); double outer_integrand_ionization2(double x,struct my_f_params* p); Real double_integral_ionization2(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral double inner_integrand_ionization2(double x, struct my_f_params* p); // ********************************************************************************************** // * PAR INTEGRAL STUFF // ********************************************************************************************** int terminate_gkq; int terminate_gkq_outer; int terminate_gkq_inner; int terminate_serial; int gkq_iter_serial; // nr of iterations const double gkq_alpha=0.816496580927726; const double gkq_beta=0.447213595499958; static const double xgkq[12] = { 0.0, -0.942882415695480, -0.816496580927726, -0.641853342345781, -0.447213595499958, -0.236383199662150, 0.0, 0.236383199662150, 0.447213595499958, 0.641853342345781, 0.816496580927726, 0.942882415695480 }; Real integral_simpson(Real (*f)(Real, void*), Real a, Real b,int n,void* p); int simpson_error; const Real tolmax=1e-20; const Real simpson_itermax=120; #define INITIAL_STACK_SIZE 128 /* initial size of new stacks */ /* the stack structure */ struct stack_s{ int el_count; /* count of elements on stack */ int el_size; /* size of an element */ int mem_reserve; /* allocated memory for stack */ void* elements; /* pointer to begin of stack */ }; typedef struct _work_t{ double a; double b; double tol; double S; double fa; double fb; double fm; double rec; int iter; struct my_f_params * p; //pointer auf params } work_t; typedef struct _work_t_gkq{ double a; double b; double toler; double I_13; double I_prev; double fa; double fb; struct my_f_params * p; //pointer auf params shortint iter; } work_gkq; typedef struct stack_s* stack_t; double integral_simpson_par(double (*f)(double, struct my_f_params*), stack_t stack); double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack); double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack); double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p); double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b, double fa,double fb, double toler,double I_13, struct my_f_params* p); // void create_stack(stack_t* stack, int element_size); // int empty_stack(stack_t stack); // void push_stack(stack_t stack, void* element); // void pop_stack(stack_t stack, void* element); /****************************************** * create new stack ******************************************/ void create_stack( stack_t* stack, /* stack to create */ int element_size) /* size of a stack element */ { int initial_size = INITIAL_STACK_SIZE; /* allocate memory for new stack struct */ (*stack) = (stack_t) malloc(sizeof(struct stack_s)); if (!(*stack)){ char errstr[255]; sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n"); error(errstr); // exit(1); } /* allocate memory for stack elements */ (*stack)->elements = (void*) malloc(element_size * initial_size); (*stack)->mem_reserve = initial_size; if (!(*stack)->elements){ char errstr[255]; sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n"); error(errstr); } (*stack)->el_size = element_size; (*stack)->el_count = 0; } /***************************************** * check if the stack is empty *****************************************/ int empty_stack(stack_t stack) { return stack->el_count <= 0; } /***************************************** * push a element on stack *****************************************/ void push_stack(stack_t stack, /* target stack */ void* element) /* element to push */ { int i, new_reserve; int log2_count; /* check if we need more memory for stack */ if (stack->el_count >= stack->mem_reserve) { /* calculate new size for the stack it should be a power of two */ for (i = stack->el_count, log2_count = 0; i > 0; i>>1, log2_count++); new_reserve = 1 << log2_count; /* reallocate memory for phase thread tables and nullify new values */ stack->elements = (void *) realloc(stack->elements, stack->el_size * new_reserve); if (!stack->elements){ char errstr [255]; sprintf(errstr, "error: can't reallocate stack.. Aborting\n"); error(errstr); // exit(1); } stack->mem_reserve = new_reserve; } /* now push the element on top of the stack */ memcpy((char*)stack->elements + stack->el_count*stack->el_size, element, stack->el_size); stack->el_count++; } /***************************************** * pop an element from stack *****************************************/ void pop_stack( stack_t stack, /* target stack */ void* element) /* where poped el. should be stored */ { if (stack->el_count <= 0){ char errstr[255]; sprintf(errstr, "error: trying to pop from empty stack.\n"); error(errstr); // exit(2); } stack->el_count--; memcpy(element, (char*)stack->elements + stack->el_count*stack->el_size, stack->el_size); } // *************************************************************************** // * Gauss-kronard quadrature, parallel // *************************************************************************** double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack) { //1st integration double result=0.0; // ********************************************* double m=0.5*(a+b); double h=0.5*(b-a); double y[13]; double fa=y[0]=f(a,p); double fb=y[12]=f(b,p); int i; for(i=1;i<12;i++) y[i]=f(m+xgkq[i]*h,p); double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod 625.0*(y[4]+y[8])+672.0*y[6]); double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+ 0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+ 0.242611071901408*y[6]); //13-point Kronrod double Err1=fabs(I_7-I_13); double Err2=fabs(I_4-I_13); double r=(Err2 != 0.0) ? Err1/Err2 : 1.0; double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL; if(I_13 == 0) I_13=b-a; I_13=fabs(I_13); //Prepare work and push onto stack work_gkq work; work.a = a; work.b = b; work.toler = toler; work.I_13=I_13; work.fa=fa; work.fb=fb; work.p=p; work.I_prev=I_7; //ANTI-FOLGENDES: //OUT OF TOLERANCE !!!, mll:3.0162e-18, a:3.0162e-18, b:3.0162e-18, mrr:3.0162e-18,I_7-I_4:0.0000e+00, tol:1.6002e-315,I_13:7.0585e-313 if(I_13 < 1e-150) return 0; push_stack(stack, &work); result=gkq_adapt(f,stack); return result; } double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p) { //1st integration double result=0.0; gkq_iter_serial=0; // ********************************************* double m=0.5*(a+b); double h=0.5*(b-a); double y[13]; double fa=y[0]=f(a,p); double fb=y[12]=f(b,p); int i; for(i=1;i<12;i++) y[i]=f(m+xgkq[i]*h,p); double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod 625.0*(y[4]+y[8])+672.0*y[6]); double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+ 0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+ 0.242611071901408*y[6]); //13-point Kronrod double Err1=fabs(I_7-I_13); double Err2=fabs(I_4-I_13); double r=(Err2 != 0.0) ? Err1/Err2 : 1.0; double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL; if(I_13 == 0) I_13=b-a; I_13=fabs(I_13); result=gkq_adapt_serial(f,a,b,fa,fb,toler,I_13, p); return result; } // *********************************************** // * RECURSIVE ADAPTION ROUTINE FOR PARALLEL-GK-QUADRATURE // ********************************************** double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack) { work_gkq work; work.iter=0; int ready, idle, busy; double integral_result = 0.0; busy = 0; terminate_gkq=0; #pragma omp parallel default(none) \ shared(stack, integral_result,f,busy,terminate_gkq,myid) \ private(work, idle, ready) { // printf("me:%d, err:%d\n",omp_get_thread_num(),simpson_error); ready = 0; idle = 1; while(!ready) // && !terminate_gkq)// && !simpson_error) //<-- so NICHT! { #pragma omp critical (stack) { if (!empty_stack(stack)) { /* we have new work */ pop_stack(stack, &work); if (idle) { /* say others i'm busy */ busy += 1; idle = 0; } } else { /* no new work on stack */ if (!idle){ busy -= 1; idle = 1; } /* nobody has anything to do; let us leave the loop */ if (busy == 0) { ready = 1; } } } /* end critical(stack) */ if (idle) continue; //if ready==1 --> leave loop double I_prev=work.I_prev; double a = work.a; double b = work.b; double toler = work.toler; double I_13=work.I_13; double fa=work.fa; double fb=work.fb; int iter=work.iter; // double *y= work.y; // brauch ich nicht! struct my_f_params * p = work.p; double m = (a+b)/2; double h = (b -a)/2; double mll=m-gkq_alpha*h; double ml=m-gkq_beta*h; double mr=m+gkq_beta*h; double mrr=m+gkq_alpha*h; double fmll=f(mll,p); double fml=f(ml,p); double fm=f(m,p); double fmr=f(mr,p); double fmrr=f(mrr,p); double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula. double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm); // if(myid==1) // printf("I_7:%.4e, I_13:%.4e,I_4:%.4e, minus:%.4e, to:%.4e\n",I_7,I_13,I_4,I_7-I_4, toler*I_13); int maxiter=50; //max. subdivisions double abstol=1e-30; work.I_prev=I_7; // für abstolcheck in nächster recursion if (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr || iter > maxiter || fabs(I_7-I_prev) < abstol ) { if ((mll <= a || b <= mrr)) //Error { // out_of_tolerance=true; // Interval contains no more machine numbers // printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e,I_7-I_4:%.4e, tol:%.4e,I_13:%.4e\n", // mll,b,b,mrr,I_7-I_4, toler*I_13,I_13); terminate_gkq=1; } #pragma omp critical (integral_result) { integral_result += I_7; //Terminate recursion. } // printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f,ubteg;%.4e\n", omp_get_thread_num(), a,b,toler,I_4,I_7,integral_result); } else //subdivide interval and push new work on stack { #pragma omp critical (stack) { // printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); work.iter=iter+1; work.a=a; work.b=mll; work.fa=fa; work.fb=fmll; push_stack(stack, &work); work.a=mll; work.b=ml; work.fa=fmll; work.fb=fml; push_stack(stack, &work); work.a=ml; work.b=m; work.fa=fml; work.fb=fm; push_stack(stack, &work); work.a=m; work.b=mr; work.fa=fm; work.fb=fmr; push_stack(stack, &work); work.a=mr; work.b=mrr; work.fa=fmr; work.fb=fmrr; push_stack(stack, &work); work.a=mrr; work.b=b; work.fa=fmrr; work.fb=fb; push_stack(stack, &work); } // pragma critical stack } // else ..non-acceptable error } // while } /* end omp parallel */ return integral_result; } double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b, double fa, double fb, double toler,double I_13, struct my_f_params* p) { double m = (a+b)/2; double h = (b -a)/2; double mll=m-gkq_alpha*h; double ml=m-gkq_beta*h; double mr=m+gkq_beta*h; double mrr=m+gkq_alpha*h; double fmll=f(mll,p); double fml=f(ml,p); double fm=f(m,p); double fmr=f(mr,p); double fmrr=f(mrr,p); double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula. double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm); gkq_iter_serial++; if ( (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr) && gkq_iter_serial) { if ((mll <= a || b <= mrr) && !terminate_serial) //Error { // out_of_tolerance=true; // Interval contains no more machine numbers printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e\n", mll,b,b,mrr); terminate_serial=1; } // printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); return I_7; } else { // printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); return gkq_adapt_serial(f, a,mll,fa,fmll,toler,I_13,p) + gkq_adapt_serial(f, mll,ml,fmll,fml,toler,I_13,p) + gkq_adapt_serial(f, ml,m,fml,fm,toler,I_13,p) + gkq_adapt_serial(f, m,mr,fm,fmr,toler,I_13,p) + gkq_adapt_serial(f, mr,mrr,fmr,fmrr,toler,I_13,p) + gkq_adapt_serial(f, mrr,b,fmrr,fb,toler,I_13,p); } }
#include "imd.h" #include <sys/time.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_errno.h> // #define USEFLOAT // hauptsächlich in der funktion genexptint. Profiling zeigte, dass // hier die meiste zeit verbraucht wird -> float verdoppelt performance #ifdef USEFLOAT typedef float Real; #define REALTYPE MPI_FLOAT #else typedef double Real; #define REALTYPE MPI_DOUBLE #endif #ifdef USEFLOAT #define EXPR expf //exp zu floaten ist eine ganz mieeese idee #define SQRTR sqrtf #define POWR powf #define LOGR logf #else #define EXPR exp #define SQRTR sqrt #define POWR pow #define LOGR log #endif // ********************************************************* // PHYSICAL CONSTANTS // ********************************************************* // const double eV2J=1.6021766E-19; const Real eV2H=0.03674932; //eV to Hartree const Real colrad_reltol=1e-5; const Real colrad_abstol=10.0; // const Real J2eV=6.2415091E18; const Real planck=6.62607004E-34; // J/s const Real bohr_radius=0.52917721067E-10; // m const Real bohr_radius_sq=2.800285202924816e-21; const Real hbar_cub=1.172812163789953e-102; //hbar^3 const Real double_emass_pow_3_2 = 2.459112949719466e-45; // (2*emass)^3/2 const int MAXLINE = 255; const Real pi=3.141592653589793; const Real pi_sq=9.869604401089358; const Real E_ion_H=13.6; // eV const Real E_ion_H_J=2.178960176000000e-18; // J const Real E_ion_H_sq_J=4.747867448593952e-36; const Real colrad_tequi=1e-12;//TEST// 1e-12; //bei initial equi ohne Temperatur-variation erst einmal //die Saha-besetzungsdichten equilibrieren //const double LIGHTSPEED=2.997925458e8; // m/s Real LASERFREQ; int colrad_ydot(double t, N_Vector u, N_Vector udot, void *user_data); void do_Saha(Real Te,Real totalc,Real ne,N_Vector y); int colrad_GetCoeffs(N_Vector y,Real It, void * user_data); // Die Zwei müssen nach Prototypes.h // void do_colrad(double dt); // void colrad_init(void); void colrad_read_states(void); void colrad_Saha_init(int i,int j,int k); // ****************************************************************************** // * CROSS SECTION INTEGRATION STUFF // ****************************************************************************** gsl_integration_workspace * winteg_inner=NULL; gsl_integration_workspace * winteg_outer=NULL; gsl_integration_workspace * winteg_fermi=NULL; gsl_integration_workspace * winteg_exc=NULL; //excitation gsl_integration_romberg_workspace * winteg_rb_inner=NULL; gsl_integration_romberg_workspace * winteg_rb_outer=NULL; struct my_f_params { Real ne; Real T;Real mu; Real E;Real DeltaE; int allowed;}; // struct my_f_params fparams_inner; //For inner integrand // struct my_f_params fparams_outer; //outer integrand // struct my_f_params fparams_fermi; // struct my_f_params fparams_exc; double inner_integrand_ionization(double x, void *p); // integrate along E' double outer_integrand_ionization(double x,void *p); // integrate along E Real double_integral_ionization(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral double inner_integrand_recombination(double x, void *p); double outer_integrand_recombination(double x,void *p); Real double_integral_recombination(Real ne,Real T, Real mu, Real DeltaE); double integrand_excitation(double x,void *p); Real eval_excitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed); Real eval_dexcitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed); double integrand_deexcitation(double x,void *p); double fermi_integrand(double x, void *p); Real eval_fermi_integrand(Real ne,Real T, Real mu); double integrand_excitation_debug(double x,void *p); double outer_integrand_ionization2(double x,struct my_f_params* p); Real double_integral_ionization2(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral double inner_integrand_ionization2(double x, struct my_f_params* p); // ********************************************************************************************** // * PAR INTEGRAL STUFF // ********************************************************************************************** int terminate_gkq; int terminate_gkq_outer; int terminate_gkq_inner; int terminate_serial; int gkq_iter_serial; // nr of iterations const double gkq_alpha=0.816496580927726; const double gkq_beta=0.447213595499958; static const double xgkq[12] = { 0.0, -0.942882415695480, -0.816496580927726, -0.641853342345781, -0.447213595499958, -0.236383199662150, 0.0, 0.236383199662150, 0.447213595499958, 0.641853342345781, 0.816496580927726, 0.942882415695480 }; Real integral_simpson(Real (*f)(Real, void*), Real a, Real b,int n,void* p); int simpson_error; const Real tolmax=1e-20; const Real simpson_itermax=120; #define INITIAL_STACK_SIZE 128 /* initial size of new stacks */ /* the stack structure */ struct stack_s{ int el_count; /* count of elements on stack */ int el_size; /* size of an element */ int mem_reserve; /* allocated memory for stack */ void* elements; /* pointer to begin of stack */ }; typedef struct _work_t{ double a; double b; double tol; double S; double fa; double fb; double fm; double rec; int iter; struct my_f_params * p; //pointer auf params } work_t; typedef struct _work_t_gkq{ double a; double b; double toler; double I_13; double I_prev; double fa; double fb; struct my_f_params * p; //pointer auf params shortint iter; } work_gkq; typedef struct stack_s* stack_t; double integral_simpson_par(double (*f)(double, struct my_f_params*), stack_t stack); double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack); double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack); double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p); double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b, double fa,double fb, double toler,double I_13, struct my_f_params* p); // void create_stack(stack_t* stack, int element_size); // int empty_stack(stack_t stack); // void push_stack(stack_t stack, void* element); // void pop_stack(stack_t stack, void* element); /****************************************** * create new stack ******************************************/ void create_stack( stack_t* stack, /* stack to create */ int element_size) /* size of a stack element */ { int initial_size = INITIAL_STACK_SIZE; /* allocate memory for new stack struct */ (*stack) = (stack_t) malloc(sizeof(struct stack_s)); if (!(*stack)){ char errstr[255]; sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n"); error(errstr); // exit(1); } /* allocate memory for stack elements */ (*stack)->elements = (void*) malloc(element_size * initial_size); (*stack)->mem_reserve = initial_size; if (!(*stack)->elements){ char errstr[255]; sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n"); error(errstr); } (*stack)->el_size = element_size; (*stack)->el_count = 0; } /***************************************** * check if the stack is empty *****************************************/ int empty_stack(stack_t stack) { return stack->el_count <= 0; } /***************************************** * push a element on stack *****************************************/ void push_stack(stack_t stack, /* target stack */ void* element) /* element to push */ { int i, new_reserve; int log2_count; /* check if we need more memory for stack */ if (stack->el_count >= stack->mem_reserve) { /* calculate new size for the stack it should be a power of two */ for (i = stack->el_count, log2_count = 0; i > 0; i>>1, log2_count++); new_reserve = 1 << log2_count; /* reallocate memory for phase thread tables and nullify new values */ stack->elements = (void *) realloc(stack->elements, stack->el_size * new_reserve); if (!stack->elements){ char errstr [255]; sprintf(errstr, "error: can't reallocate stack.. Aborting\n"); error(errstr); // exit(1); } stack->mem_reserve = new_reserve; } /* now push the element on top of the stack */ memcpy((char*)stack->elements + stack->el_count*stack->el_size, element, stack->el_size); stack->el_count++; } /***************************************** * pop an element from stack *****************************************/ void pop_stack( stack_t stack, /* target stack */ void* element) /* where poped el. should be stored */ { if (stack->el_count <= 0){ char errstr[255]; sprintf(errstr, "error: trying to pop from empty stack.\n"); error(errstr); // exit(2); } stack->el_count--; memcpy(element, (char*)stack->elements + stack->el_count*stack->el_size, stack->el_size); } // *************************************************************************** // * Gauss-kronard quadrature, parallel // *************************************************************************** double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack) { //1st integration double result=0.0; // ********************************************* double m=0.5*(a+b); double h=0.5*(b-a); double y[13]; double fa=y[0]=f(a,p); double fb=y[12]=f(b,p); int i; for(i=1;i<12;i++) y[i]=f(m+xgkq[i]*h,p); double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod 625.0*(y[4]+y[8])+672.0*y[6]); double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+ 0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+ 0.242611071901408*y[6]); //13-point Kronrod double Err1=fabs(I_7-I_13); double Err2=fabs(I_4-I_13); double r=(Err2 != 0.0) ? Err1/Err2 : 1.0; double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL; if(I_13 == 0) I_13=b-a; I_13=fabs(I_13); //Prepare work and push onto stack work_gkq work; work.a = a; work.b = b; work.toler = toler; work.I_13=I_13; work.fa=fa; work.fb=fb; work.p=p; work.I_prev=I_7; //ANTI-FOLGENDES: //OUT OF TOLERANCE !!!, mll:3.0162e-18, a:3.0162e-18, b:3.0162e-18, mrr:3.0162e-18,I_7-I_4:0.0000e+00, tol:1.6002e-315,I_13:7.0585e-313 if(I_13 < 1e-150) return 0; push_stack(stack, &work); result=gkq_adapt(f,stack); return result; } double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p) { //1st integration double result=0.0; gkq_iter_serial=0; // ********************************************* double m=0.5*(a+b); double h=0.5*(b-a); double y[13]; double fa=y[0]=f(a,p); double fb=y[12]=f(b,p); int i; for(i=1;i<12;i++) y[i]=f(m+xgkq[i]*h,p); double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod 625.0*(y[4]+y[8])+672.0*y[6]); double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+ 0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+ 0.242611071901408*y[6]); //13-point Kronrod double Err1=fabs(I_7-I_13); double Err2=fabs(I_4-I_13); double r=(Err2 != 0.0) ? Err1/Err2 : 1.0; double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL; if(I_13 == 0) I_13=b-a; I_13=fabs(I_13); result=gkq_adapt_serial(f,a,b,fa,fb,toler,I_13, p); return result; } // *********************************************** // * RECURSIVE ADAPTION ROUTINE FOR PARALLEL-GK-QUADRATURE // ********************************************** double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack) { work_gkq work; work.iter=0; int ready, idle, busy; double integral_result = 0.0; busy = 0; terminate_gkq=0; shared(stack, integral_result,f,busy,terminate_gkq,myid) \ private(work, idle, ready) { // printf("me:%d, err:%d\n",omp_get_thread_num(),simpson_error); ready = 0; idle = 1; while(!ready) // && !terminate_gkq)// && !simpson_error) //<-- so NICHT! { if (!empty_stack(stack)) { /* we have new work */ pop_stack(stack, &work); if (idle) { /* say others i'm busy */ busy += 1; idle = 0; } } else { /* no new work on stack */ if (!idle){ busy -= 1; idle = 1; } /* nobody has anything to do; let us leave the loop */ if (busy == 0) { ready = 1; } } /* end critical(stack) */ if (idle) continue; //if ready==1 --> leave loop double I_prev=work.I_prev; double a = work.a; double b = work.b; double toler = work.toler; double I_13=work.I_13; double fa=work.fa; double fb=work.fb; int iter=work.iter; // double *y= work.y; // brauch ich nicht! struct my_f_params * p = work.p; double m = (a+b)/2; double h = (b -a)/2; double mll=m-gkq_alpha*h; double ml=m-gkq_beta*h; double mr=m+gkq_beta*h; double mrr=m+gkq_alpha*h; double fmll=f(mll,p); double fml=f(ml,p); double fm=f(m,p); double fmr=f(mr,p); double fmrr=f(mrr,p); double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula. double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm); // if(myid==1) // printf("I_7:%.4e, I_13:%.4e,I_4:%.4e, minus:%.4e, to:%.4e\n",I_7,I_13,I_4,I_7-I_4, toler*I_13); int maxiter=50; //max. subdivisions double abstol=1e-30; work.I_prev=I_7; // für abstolcheck in nächster recursion if (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr || iter > maxiter || fabs(I_7-I_prev) < abstol ) { if ((mll <= a || b <= mrr)) //Error { // out_of_tolerance=true; // Interval contains no more machine numbers // printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e,I_7-I_4:%.4e, tol:%.4e,I_13:%.4e\n", // mll,b,b,mrr,I_7-I_4, toler*I_13,I_13); terminate_gkq=1; } integral_result += I_7; //Terminate recursion. // printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f,ubteg;%.4e\n", omp_get_thread_num(), a,b,toler,I_4,I_7,integral_result); } else //subdivide interval and push new work on stack { // printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); work.iter=iter+1; work.a=a; work.b=mll; work.fa=fa; work.fb=fmll; push_stack(stack, &work); work.a=mll; work.b=ml; work.fa=fmll; work.fb=fml; push_stack(stack, &work); work.a=ml; work.b=m; work.fa=fml; work.fb=fm; push_stack(stack, &work); work.a=m; work.b=mr; work.fa=fm; work.fb=fmr; push_stack(stack, &work); work.a=mr; work.b=mrr; work.fa=fmr; work.fb=fmrr; push_stack(stack, &work); work.a=mrr; work.b=b; work.fa=fmrr; work.fb=fb; push_stack(stack, &work); // pragma critical stack } // else ..non-acceptable error } // while } /* end omp parallel */ return integral_result; } double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b, double fa, double fb, double toler,double I_13, struct my_f_params* p) { double m = (a+b)/2; double h = (b -a)/2; double mll=m-gkq_alpha*h; double ml=m-gkq_beta*h; double mr=m+gkq_beta*h; double mrr=m+gkq_alpha*h; double fmll=f(mll,p); double fml=f(ml,p); double fm=f(m,p); double fmr=f(mr,p); double fmrr=f(mrr,p); double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula. double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm); gkq_iter_serial++; if ( (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr) && gkq_iter_serial) { if ((mll <= a || b <= mrr) && !terminate_serial) //Error { // out_of_tolerance=true; // Interval contains no more machine numbers printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e\n", mll,b,b,mrr); terminate_serial=1; } // printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); return I_7; } else { // printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); return gkq_adapt_serial(f, a,mll,fa,fmll,toler,I_13,p) + gkq_adapt_serial(f, mll,ml,fmll,fml,toler,I_13,p) + gkq_adapt_serial(f, ml,m,fml,fm,toler,I_13,p) + gkq_adapt_serial(f, m,mr,fm,fmr,toler,I_13,p) + gkq_adapt_serial(f, mr,mrr,fmr,fmrr,toler,I_13,p) + gkq_adapt_serial(f, mrr,b,fmrr,fb,toler,I_13,p); } }
#include "imd.h" #include <sys/time.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_errno.h> // #define USEFLOAT // hauptsächlich in der funktion genexptint. Profiling zeigte, dass // hier die meiste zeit verbraucht wird -> float verdoppelt performance #ifdef USEFLOAT typedef float Real; #define REALTYPE MPI_FLOAT #else typedef double Real; #define REALTYPE MPI_DOUBLE #endif #ifdef USEFLOAT #define EXPR expf //exp zu floaten ist eine ganz mieeese idee #define SQRTR sqrtf #define POWR powf #define LOGR logf #else #define EXPR exp #define SQRTR sqrt #define POWR pow #define LOGR log #endif // ********************************************************* // PHYSICAL CONSTANTS // ********************************************************* // const double eV2J=1.6021766E-19; const Real eV2H=0.03674932; //eV to Hartree const Real colrad_reltol=1e-5; const Real colrad_abstol=10.0; // const Real J2eV=6.2415091E18; const Real planck=6.62607004E-34; // J/s const Real bohr_radius=0.52917721067E-10; // m const Real bohr_radius_sq=2.800285202924816e-21; const Real hbar_cub=1.172812163789953e-102; //hbar^3 const Real double_emass_pow_3_2 = 2.459112949719466e-45; // (2*emass)^3/2 const int MAXLINE = 255; const Real pi=3.141592653589793; const Real pi_sq=9.869604401089358; const Real E_ion_H=13.6; // eV const Real E_ion_H_J=2.178960176000000e-18; // J const Real E_ion_H_sq_J=4.747867448593952e-36; const Real colrad_tequi=1e-12;//TEST// 1e-12; //bei initial equi ohne Temperatur-variation erst einmal //die Saha-besetzungsdichten equilibrieren //const double LIGHTSPEED=2.997925458e8; // m/s Real LASERFREQ; int colrad_ydot(double t, N_Vector u, N_Vector udot, void *user_data); void do_Saha(Real Te,Real totalc,Real ne,N_Vector y); int colrad_GetCoeffs(N_Vector y,Real It, void * user_data); // Die Zwei müssen nach Prototypes.h // void do_colrad(double dt); // void colrad_init(void); void colrad_read_states(void); void colrad_Saha_init(int i,int j,int k); // ****************************************************************************** // * CROSS SECTION INTEGRATION STUFF // ****************************************************************************** gsl_integration_workspace * winteg_inner=NULL; gsl_integration_workspace * winteg_outer=NULL; gsl_integration_workspace * winteg_fermi=NULL; gsl_integration_workspace * winteg_exc=NULL; //excitation gsl_integration_romberg_workspace * winteg_rb_inner=NULL; gsl_integration_romberg_workspace * winteg_rb_outer=NULL; struct my_f_params { Real ne; Real T;Real mu; Real E;Real DeltaE; int allowed;}; // struct my_f_params fparams_inner; //For inner integrand // struct my_f_params fparams_outer; //outer integrand // struct my_f_params fparams_fermi; // struct my_f_params fparams_exc; double inner_integrand_ionization(double x, void *p); // integrate along E' double outer_integrand_ionization(double x,void *p); // integrate along E Real double_integral_ionization(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral double inner_integrand_recombination(double x, void *p); double outer_integrand_recombination(double x,void *p); Real double_integral_recombination(Real ne,Real T, Real mu, Real DeltaE); double integrand_excitation(double x,void *p); Real eval_excitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed); Real eval_dexcitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed); double integrand_deexcitation(double x,void *p); double fermi_integrand(double x, void *p); Real eval_fermi_integrand(Real ne,Real T, Real mu); double integrand_excitation_debug(double x,void *p); double outer_integrand_ionization2(double x,struct my_f_params* p); Real double_integral_ionization2(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral double inner_integrand_ionization2(double x, struct my_f_params* p); // ********************************************************************************************** // * PAR INTEGRAL STUFF // ********************************************************************************************** int terminate_gkq; int terminate_gkq_outer; int terminate_gkq_inner; int terminate_serial; int gkq_iter_serial; // nr of iterations const double gkq_alpha=0.816496580927726; const double gkq_beta=0.447213595499958; static const double xgkq[12] = { 0.0, -0.942882415695480, -0.816496580927726, -0.641853342345781, -0.447213595499958, -0.236383199662150, 0.0, 0.236383199662150, 0.447213595499958, 0.641853342345781, 0.816496580927726, 0.942882415695480 }; Real integral_simpson(Real (*f)(Real, void*), Real a, Real b,int n,void* p); int simpson_error; const Real tolmax=1e-20; const Real simpson_itermax=120; #define INITIAL_STACK_SIZE 128 /* initial size of new stacks */ /* the stack structure */ struct stack_s{ int el_count; /* count of elements on stack */ int el_size; /* size of an element */ int mem_reserve; /* allocated memory for stack */ void* elements; /* pointer to begin of stack */ }; typedef struct _work_t{ double a; double b; double tol; double S; double fa; double fb; double fm; double rec; int iter; struct my_f_params * p; //pointer auf params } work_t; typedef struct _work_t_gkq{ double a; double b; double toler; double I_13; double I_prev; double fa; double fb; struct my_f_params * p; //pointer auf params shortint iter; } work_gkq; typedef struct stack_s* stack_t; double integral_simpson_par(double (*f)(double, struct my_f_params*), stack_t stack); double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack); double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack); double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p); double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b, double fa,double fb, double toler,double I_13, struct my_f_params* p); // void create_stack(stack_t* stack, int element_size); // int empty_stack(stack_t stack); // void push_stack(stack_t stack, void* element); // void pop_stack(stack_t stack, void* element); /****************************************** * create new stack ******************************************/ void create_stack( stack_t* stack, /* stack to create */ int element_size) /* size of a stack element */ { int initial_size = INITIAL_STACK_SIZE; /* allocate memory for new stack struct */ (*stack) = (stack_t) malloc(sizeof(struct stack_s)); if (!(*stack)){ char errstr[255]; sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n"); error(errstr); // exit(1); } /* allocate memory for stack elements */ (*stack)->elements = (void*) malloc(element_size * initial_size); (*stack)->mem_reserve = initial_size; if (!(*stack)->elements){ char errstr[255]; sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n"); error(errstr); } (*stack)->el_size = element_size; (*stack)->el_count = 0; } /***************************************** * check if the stack is empty *****************************************/ int empty_stack(stack_t stack) { return stack->el_count <= 0; } /***************************************** * push a element on stack *****************************************/ void push_stack(stack_t stack, /* target stack */ void* element) /* element to push */ { int i, new_reserve; int log2_count; /* check if we need more memory for stack */ if (stack->el_count >= stack->mem_reserve) { /* calculate new size for the stack it should be a power of two */ for (i = stack->el_count, log2_count = 0; i > 0; i>>1, log2_count++); new_reserve = 1 << log2_count; /* reallocate memory for phase thread tables and nullify new values */ stack->elements = (void *) realloc(stack->elements, stack->el_size * new_reserve); if (!stack->elements){ char errstr [255]; sprintf(errstr, "error: can't reallocate stack.. Aborting\n"); error(errstr); // exit(1); } stack->mem_reserve = new_reserve; } /* now push the element on top of the stack */ memcpy((char*)stack->elements + stack->el_count*stack->el_size, element, stack->el_size); stack->el_count++; } /***************************************** * pop an element from stack *****************************************/ void pop_stack( stack_t stack, /* target stack */ void* element) /* where poped el. should be stored */ { if (stack->el_count <= 0){ char errstr[255]; sprintf(errstr, "error: trying to pop from empty stack.\n"); error(errstr); // exit(2); } stack->el_count--; memcpy(element, (char*)stack->elements + stack->el_count*stack->el_size, stack->el_size); } // *************************************************************************** // * Gauss-kronard quadrature, parallel // *************************************************************************** double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack) { //1st integration double result=0.0; // ********************************************* double m=0.5*(a+b); double h=0.5*(b-a); double y[13]; double fa=y[0]=f(a,p); double fb=y[12]=f(b,p); int i; for(i=1;i<12;i++) y[i]=f(m+xgkq[i]*h,p); double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod 625.0*(y[4]+y[8])+672.0*y[6]); double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+ 0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+ 0.242611071901408*y[6]); //13-point Kronrod double Err1=fabs(I_7-I_13); double Err2=fabs(I_4-I_13); double r=(Err2 != 0.0) ? Err1/Err2 : 1.0; double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL; if(I_13 == 0) I_13=b-a; I_13=fabs(I_13); //Prepare work and push onto stack work_gkq work; work.a = a; work.b = b; work.toler = toler; work.I_13=I_13; work.fa=fa; work.fb=fb; work.p=p; work.I_prev=I_7; //ANTI-FOLGENDES: //OUT OF TOLERANCE !!!, mll:3.0162e-18, a:3.0162e-18, b:3.0162e-18, mrr:3.0162e-18,I_7-I_4:0.0000e+00, tol:1.6002e-315,I_13:7.0585e-313 if(I_13 < 1e-150) return 0; push_stack(stack, &work); result=gkq_adapt(f,stack); return result; } double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p) { //1st integration double result=0.0; gkq_iter_serial=0; // ********************************************* double m=0.5*(a+b); double h=0.5*(b-a); double y[13]; double fa=y[0]=f(a,p); double fb=y[12]=f(b,p); int i; for(i=1;i<12;i++) y[i]=f(m+xgkq[i]*h,p); double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod 625.0*(y[4]+y[8])+672.0*y[6]); double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+ 0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+ 0.242611071901408*y[6]); //13-point Kronrod double Err1=fabs(I_7-I_13); double Err2=fabs(I_4-I_13); double r=(Err2 != 0.0) ? Err1/Err2 : 1.0; double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL; if(I_13 == 0) I_13=b-a; I_13=fabs(I_13); result=gkq_adapt_serial(f,a,b,fa,fb,toler,I_13, p); return result; } // *********************************************** // * RECURSIVE ADAPTION ROUTINE FOR PARALLEL-GK-QUADRATURE // ********************************************** double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack) { work_gkq work; work.iter=0; int ready, idle, busy; double integral_result = 0.0; busy = 0; terminate_gkq=0; #pragma omp parallel default(none) \ shared(stack, integral_result,f,busy,terminate_gkq,myid) \ private(work, idle, ready) { // printf("me:%d, err:%d\n",omp_get_thread_num(),simpson_error); ready = 0; idle = 1; while(!ready) // && !terminate_gkq)// && !simpson_error) //<-- so NICHT! { #pragma omp critical (stack) { if (!empty_stack(stack)) { /* we have new work */ pop_stack(stack, &work); if (idle) { /* say others i'm busy */ busy += 1; idle = 0; } } else { /* no new work on stack */ if (!idle){ busy -= 1; idle = 1; } /* nobody has anything to do; let us leave the loop */ if (busy == 0) { ready = 1; } } } /* end critical(stack) */ if (idle) continue; //if ready==1 --> leave loop double I_prev=work.I_prev; double a = work.a; double b = work.b; double toler = work.toler; double I_13=work.I_13; double fa=work.fa; double fb=work.fb; int iter=work.iter; // double *y= work.y; // brauch ich nicht! struct my_f_params * p = work.p; double m = (a+b)/2; double h = (b -a)/2; double mll=m-gkq_alpha*h; double ml=m-gkq_beta*h; double mr=m+gkq_beta*h; double mrr=m+gkq_alpha*h; double fmll=f(mll,p); double fml=f(ml,p); double fm=f(m,p); double fmr=f(mr,p); double fmrr=f(mrr,p); double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula. double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm); // if(myid==1) // printf("I_7:%.4e, I_13:%.4e,I_4:%.4e, minus:%.4e, to:%.4e\n",I_7,I_13,I_4,I_7-I_4, toler*I_13); int maxiter=50; //max. subdivisions double abstol=1e-30; work.I_prev=I_7; // für abstolcheck in nächster recursion if (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr || iter > maxiter || fabs(I_7-I_prev) < abstol ) { if ((mll <= a || b <= mrr)) //Error { // out_of_tolerance=true; // Interval contains no more machine numbers // printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e,I_7-I_4:%.4e, tol:%.4e,I_13:%.4e\n", // mll,b,b,mrr,I_7-I_4, toler*I_13,I_13); terminate_gkq=1; } #pragma omp critical (integral_result) { integral_result += I_7; //Terminate recursion. } // printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f,ubteg;%.4e\n", omp_get_thread_num(), a,b,toler,I_4,I_7,integral_result); } else //subdivide interval and push new work on stack { #pragma omp critical (stack) { // printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); work.iter=iter+1; work.a=a; work.b=mll; work.fa=fa; work.fb=fmll; push_stack(stack, &work); work.a=mll; work.b=ml; work.fa=fmll; work.fb=fml; push_stack(stack, &work); work.a=ml; work.b=m; work.fa=fml; work.fb=fm; push_stack(stack, &work); work.a=m; work.b=mr; work.fa=fm; work.fb=fmr; push_stack(stack, &work); work.a=mr; work.b=mrr; work.fa=fmr; work.fb=fmrr; push_stack(stack, &work); work.a=mrr; work.b=b; work.fa=fmrr; work.fb=fb; push_stack(stack, &work); } // pragma critical stack } // else ..non-acceptable error } // while } /* end omp parallel */ return integral_result; } double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b, double fa, double fb, double toler,double I_13, struct my_f_params* p) { double m = (a+b)/2; double h = (b -a)/2; double mll=m-gkq_alpha*h; double ml=m-gkq_beta*h; double mr=m+gkq_beta*h; double mrr=m+gkq_alpha*h; double fmll=f(mll,p); double fml=f(ml,p); double fm=f(m,p); double fmr=f(mr,p); double fmrr=f(mrr,p); double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula. double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm); gkq_iter_serial++; if ( (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr) && gkq_iter_serial) { if ((mll <= a || b <= mrr) && !terminate_serial) //Error { // out_of_tolerance=true; // Interval contains no more machine numbers printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e\n", mll,b,b,mrr); terminate_serial=1; } // printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); return I_7; } else { // printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7); return gkq_adapt_serial(f, a,mll,fa,fmll,toler,I_13,p) + gkq_adapt_serial(f, mll,ml,fmll,fml,toler,I_13,p) + gkq_adapt_serial(f, ml,m,fml,fm,toler,I_13,p) + gkq_adapt_serial(f, m,mr,fm,fmr,toler,I_13,p) + gkq_adapt_serial(f, mr,mrr,fmr,fmrr,toler,I_13,p) + gkq_adapt_serial(f, mrr,b,fmrr,fb,toler,I_13,p); } }
SpatialSubSampling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialSubSampling.c" #else static inline void THNN_(SpatialSubSampling_shapeCheck)( THTensor *input, THTensor *gradOutput, THTensor *weight, int kW, int kH) { int ndims = input->nDimension; THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D input tensor expected but got: %s"); THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); int nInputPlane = THTensor_(size)(weight, 0); int dimw = 2; int dimh = 1; long inputWidth; long inputHeight; if (input->nDimension == 4) { dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; THArgCheck(input->size[dimh-1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(inputWidth >= kW && inputHeight >= kH, 2, "input image smaller than kernel size"); } void THNN_(SpatialSubSampling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, int kW, int kH, int dW, int dH) { THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); real *weight_data = THTensor_(data)(weight); real *bias_data = THTensor_(data)(bias); real *output_data; real *input_data; int dimw = 2; int dimh = 1; long nbatch = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size)(weight,0); long k; THNN_(SpatialSubSampling_shapeCheck)(input, NULL, weight, kW, kH); if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; if (input->nDimension == 3) THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth); else THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); input_data = THTensor_(data)(input); output_data = THTensor_(data)(output); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) { long p; for(p = 0; p < nbatch; p++) { long xx, yy; /* For all output pixels... */ real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; /* Get the good mask for (k,i) (k out, i in) */ real the_weight = weight_data[k]; /* Initialize to the bias */ real z = bias_data[k]; long i; for(i = 0; i < outputWidth*outputHeight; i++) ptr_output[i] = z; for(yy = 0; yy < outputHeight; yy++) { for(xx = 0; xx < outputWidth; xx++) { /* Compute the mean of the input image... */ real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; real sum = 0; long kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += inputWidth; /* next input line */ } /* Update output */ *ptr_output++ += the_weight*sum; } } } } THTensor_(free)(input); } void THNN_(SpatialSubSampling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, int kW, int kH, int dW, int dH) { THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, weight, kW, kH); int dimw = 2; int dimh = 1; long nbatch = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size)(weight,0); real *weight_data; real *gradOutput_data; real *input_data, *gradInput_data; long k; if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; weight_data = THTensor_(data)(weight); gradOutput = THTensor_(newContiguous)(gradOutput); gradOutput_data = THTensor_(data)(gradOutput); input_data = THTensor_(data)(input); THTensor_(resizeAs)(gradInput, input); gradInput_data = THTensor_(data)(gradInput); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) { long p; for(p = 0; p < nbatch; p++) { real the_weight = weight_data[k]; real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; long xx, yy; real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; long i; for(i=0; i<inputWidth*inputHeight; i++) ptr_gi[i] = 0.0; for(yy = 0; yy < outputHeight; yy++) { for(xx = 0; xx < outputWidth; xx++) { real *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; real z = *ptr_gradOutput++ * the_weight; long kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += inputWidth; } } } } } THTensor_(free)(gradOutput); } void THNN_(SpatialSubSampling_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, int kW, int kH, int dW, int dH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, gradWeight, kW, kH); long nbatch = 1; long dimw = 2; long dimh = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size)(gradWeight,0); real *gradWeight_data; real *gradBias_data; real *gradOutput_data; real *input_data; long k; if (input->nDimension == 4) { dimw++; dimh++; nbatch = input->size[0]; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; gradWeight_data = THTensor_(data)(gradWeight); gradBias_data = THTensor_(data)(gradBias); gradOutput = THTensor_(newContiguous)(gradOutput); gradOutput_data = THTensor_(data)(gradOutput); input = THTensor_(newContiguous)(input); input_data = THTensor_(data)(input); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane; k++) { long p; for(p = 0; p < nbatch; p++) { real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; real sum; long xx, yy; long i; sum = 0; for(i = 0; i < outputWidth*outputHeight; i++) sum += ptr_gradOutput[i]; gradBias_data[k] += scale*sum; sum = 0; for(yy = 0; yy < outputHeight; yy++) { for(xx = 0; xx < outputWidth; xx++) { real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; real z = *ptr_gradOutput++; long kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += z * ptr_input[kx]; ptr_input += inputWidth; } } } gradWeight_data[k] += scale*sum; } } THTensor_(free)(input); THTensor_(free)(gradOutput); } #endif
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialSubSampling.c" #else static inline void THNN_(SpatialSubSampling_shapeCheck) ( THTensor * input, THTensor * gradOutput, THTensor * weight, int kW, int kH) { int ndims = input->nDimension; THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D input tensor expected but got: %s"); THArgCheck(THTensor_(isContiguous) (weight), 4, "weight must be contiguous"); int nInputPlane = THTensor_(size) (weight, 0); int dimw = 2; int dimh = 1; long inputWidth; long inputHeight; if (input->nDimension == 4) { dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; THArgCheck(input->size[dimh - 1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(inputWidth >= kW && inputHeight >= kH, 2, "input image smaller than kernel size"); } void THNN_(SpatialSubSampling_updateOutput) ( THNNState * state, THTensor * input, THTensor * output, THTensor * weight, THTensor * bias, int kW, int kH, int dW, int dH) { THArgCheck(!bias || THTensor_(isContiguous) (bias), 5, "bias must be contiguous"); real *weight_data = THTensor_(data) (weight); real *bias_data = THTensor_(data) (bias); real *output_data; real *input_data; int dimw = 2; int dimh = 1; long nbatch = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size) (weight, 0); long k; THNN_(SpatialSubSampling_shapeCheck) (input, NULL, weight, kW, kH); if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; if (input->nDimension == 3) THTensor_(resize3d) (output, nInputPlane, outputHeight, outputWidth); else THTensor_(resize4d) (output, input->size[0], nInputPlane, outputHeight, outputWidth); input = THTensor_(newContiguous) (input); input_data = THTensor_(data) (input); output_data = THTensor_(data) (output); for (k = 0; k < nInputPlane; k++) { long p; for (p = 0; p < nbatch; p++) { long xx, yy; /* For all output pixels... */ real *ptr_output = output_data + p * nInputPlane * outputWidth * outputHeight + k * outputWidth * outputHeight; /* Get the good mask for (k,i) (k out, i in) */ real the_weight = weight_data[k]; /* Initialize to the bias */ real z = bias_data[k]; long i; for (i = 0; i < outputWidth * outputHeight; i++) ptr_output[i] = z; for (yy = 0; yy < outputHeight; yy++) { for (xx = 0; xx < outputWidth; xx++) { /* Compute the mean of the input image... */ real *ptr_input = input_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight + yy * dH * inputWidth + xx * dW; real sum = 0; long kx, ky; for (ky = 0; ky < kH; ky++) { for (kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += inputWidth; /* next input line */ } /* Update output */ *ptr_output++ += the_weight * sum; } } } } THTensor_(free) (input); } void THNN_(SpatialSubSampling_updateGradInput) ( THNNState * state, THTensor * input, THTensor * gradOutput, THTensor * gradInput, THTensor * weight, int kW, int kH, int dW, int dH) { THNN_(SpatialSubSampling_shapeCheck) (input, gradOutput, weight, kW, kH); int dimw = 2; int dimh = 1; long nbatch = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size) (weight, 0); real *weight_data; real *gradOutput_data; real *input_data, *gradInput_data; long k; if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; weight_data = THTensor_(data) (weight); gradOutput = THTensor_(newContiguous) (gradOutput); gradOutput_data = THTensor_(data) (gradOutput); input_data = THTensor_(data) (input); THTensor_(resizeAs) (gradInput, input); gradInput_data = THTensor_(data) (gradInput); for (k = 0; k < nInputPlane; k++) { long p; for (p = 0; p < nbatch; p++) { real the_weight = weight_data[k]; real *ptr_gradOutput = gradOutput_data + p * nInputPlane * outputHeight * outputWidth + k * outputWidth * outputHeight; long xx, yy; real *ptr_gi = gradInput_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight; long i; for (i = 0; i < inputWidth * inputHeight; i++) ptr_gi[i] = 0.0; for (yy = 0; yy < outputHeight; yy++) { for (xx = 0; xx < outputWidth; xx++) { real *ptr_gradInput = gradInput_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight + yy * dH * inputWidth + xx * dW; real z = *ptr_gradOutput++ * the_weight; long kx, ky; for (ky = 0; ky < kH; ky++) { for (kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += inputWidth; } } } } } THTensor_(free) (gradOutput); } void THNN_(SpatialSubSampling_accGradParameters) ( THNNState * state, THTensor * input, THTensor * gradOutput, THTensor * gradWeight, THTensor * gradBias, int kW, int kH, int dW, int dH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(SpatialSubSampling_shapeCheck) (input, gradOutput, gradWeight, kW, kH); long nbatch = 1; long dimw = 2; long dimh = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size) (gradWeight, 0); real *gradWeight_data; real *gradBias_data; real *gradOutput_data; real *input_data; long k; if (input->nDimension == 4) { dimw++; dimh++; nbatch = input->size[0]; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; gradWeight_data = THTensor_(data) (gradWeight); gradBias_data = THTensor_(data) (gradBias); gradOutput = THTensor_(newContiguous) (gradOutput); gradOutput_data = THTensor_(data) (gradOutput); input = THTensor_(newContiguous) (input); input_data = THTensor_(data) (input); for (k = 0; k < nInputPlane; k++) { long p; for (p = 0; p < nbatch; p++) { real *ptr_gradOutput = gradOutput_data + p * nInputPlane * outputHeight * outputWidth + k * outputWidth * outputHeight; real sum; long xx, yy; long i; sum = 0; for (i = 0; i < outputWidth * outputHeight; i++) sum += ptr_gradOutput[i]; gradBias_data[k] += scale * sum; sum = 0; for (yy = 0; yy < outputHeight; yy++) { for (xx = 0; xx < outputWidth; xx++) { real *ptr_input = input_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight + yy * dH * inputWidth + xx * dW; real z = *ptr_gradOutput++; long kx, ky; for (ky = 0; ky < kH; ky++) { for (kx = 0; kx < kW; kx++) sum += z * ptr_input[kx]; ptr_input += inputWidth; } } } gradWeight_data[k] += scale * sum; } } THTensor_(free) (input); THTensor_(free) (gradOutput); } #endif
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialSubSampling.c" #else static inline void THNN_(SpatialSubSampling_shapeCheck) ( THTensor * input, THTensor * gradOutput, THTensor * weight, int kW, int kH) { int ndims = input->nDimension; THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, "3D or 4D input tensor expected but got: %s"); THArgCheck(THTensor_(isContiguous) (weight), 4, "weight must be contiguous"); int nInputPlane = THTensor_(size) (weight, 0); int dimw = 2; int dimh = 1; long inputWidth; long inputHeight; if (input->nDimension == 4) { dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; THArgCheck(input->size[dimh - 1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(inputWidth >= kW && inputHeight >= kH, 2, "input image smaller than kernel size"); } void THNN_(SpatialSubSampling_updateOutput) ( THNNState * state, THTensor * input, THTensor * output, THTensor * weight, THTensor * bias, int kW, int kH, int dW, int dH) { THArgCheck(!bias || THTensor_(isContiguous) (bias), 5, "bias must be contiguous"); real *weight_data = THTensor_(data) (weight); real *bias_data = THTensor_(data) (bias); real *output_data; real *input_data; int dimw = 2; int dimh = 1; long nbatch = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size) (weight, 0); long k; THNN_(SpatialSubSampling_shapeCheck) (input, NULL, weight, kW, kH); if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; if (input->nDimension == 3) THTensor_(resize3d) (output, nInputPlane, outputHeight, outputWidth); else THTensor_(resize4d) (output, input->size[0], nInputPlane, outputHeight, outputWidth); input = THTensor_(newContiguous) (input); input_data = THTensor_(data) (input); output_data = THTensor_(data) (output); #pragma omp parallel for private(k) for (k = 0; k < nInputPlane; k++) { long p; for (p = 0; p < nbatch; p++) { long xx, yy; /* For all output pixels... */ real *ptr_output = output_data + p * nInputPlane * outputWidth * outputHeight + k * outputWidth * outputHeight; /* Get the good mask for (k,i) (k out, i in) */ real the_weight = weight_data[k]; /* Initialize to the bias */ real z = bias_data[k]; long i; for (i = 0; i < outputWidth * outputHeight; i++) ptr_output[i] = z; for (yy = 0; yy < outputHeight; yy++) { for (xx = 0; xx < outputWidth; xx++) { /* Compute the mean of the input image... */ real *ptr_input = input_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight + yy * dH * inputWidth + xx * dW; real sum = 0; long kx, ky; for (ky = 0; ky < kH; ky++) { for (kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += inputWidth; /* next input line */ } /* Update output */ *ptr_output++ += the_weight * sum; } } } } THTensor_(free) (input); } void THNN_(SpatialSubSampling_updateGradInput) ( THNNState * state, THTensor * input, THTensor * gradOutput, THTensor * gradInput, THTensor * weight, int kW, int kH, int dW, int dH) { THNN_(SpatialSubSampling_shapeCheck) (input, gradOutput, weight, kW, kH); int dimw = 2; int dimh = 1; long nbatch = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size) (weight, 0); real *weight_data; real *gradOutput_data; real *input_data, *gradInput_data; long k; if (input->nDimension == 4) { nbatch = input->size[0]; dimw++; dimh++; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; weight_data = THTensor_(data) (weight); gradOutput = THTensor_(newContiguous) (gradOutput); gradOutput_data = THTensor_(data) (gradOutput); input_data = THTensor_(data) (input); THTensor_(resizeAs) (gradInput, input); gradInput_data = THTensor_(data) (gradInput); #pragma omp parallel for private(k) for (k = 0; k < nInputPlane; k++) { long p; for (p = 0; p < nbatch; p++) { real the_weight = weight_data[k]; real *ptr_gradOutput = gradOutput_data + p * nInputPlane * outputHeight * outputWidth + k * outputWidth * outputHeight; long xx, yy; real *ptr_gi = gradInput_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight; long i; for (i = 0; i < inputWidth * inputHeight; i++) ptr_gi[i] = 0.0; for (yy = 0; yy < outputHeight; yy++) { for (xx = 0; xx < outputWidth; xx++) { real *ptr_gradInput = gradInput_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight + yy * dH * inputWidth + xx * dW; real z = *ptr_gradOutput++ * the_weight; long kx, ky; for (ky = 0; ky < kH; ky++) { for (kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += inputWidth; } } } } } THTensor_(free) (gradOutput); } void THNN_(SpatialSubSampling_accGradParameters) ( THNNState * state, THTensor * input, THTensor * gradOutput, THTensor * gradWeight, THTensor * gradBias, int kW, int kH, int dW, int dH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(SpatialSubSampling_shapeCheck) (input, gradOutput, gradWeight, kW, kH); long nbatch = 1; long dimw = 2; long dimh = 1; long inputWidth; long inputHeight; long outputWidth; long outputHeight; int nInputPlane = THTensor_(size) (gradWeight, 0); real *gradWeight_data; real *gradBias_data; real *gradOutput_data; real *input_data; long k; if (input->nDimension == 4) { dimw++; dimh++; nbatch = input->size[0]; } inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; outputWidth = (inputWidth - kW) / dW + 1; outputHeight = (inputHeight - kH) / dH + 1; gradWeight_data = THTensor_(data) (gradWeight); gradBias_data = THTensor_(data) (gradBias); gradOutput = THTensor_(newContiguous) (gradOutput); gradOutput_data = THTensor_(data) (gradOutput); input = THTensor_(newContiguous) (input); input_data = THTensor_(data) (input); #pragma omp parallel for private(k) for (k = 0; k < nInputPlane; k++) { long p; for (p = 0; p < nbatch; p++) { real *ptr_gradOutput = gradOutput_data + p * nInputPlane * outputHeight * outputWidth + k * outputWidth * outputHeight; real sum; long xx, yy; long i; sum = 0; for (i = 0; i < outputWidth * outputHeight; i++) sum += ptr_gradOutput[i]; gradBias_data[k] += scale * sum; sum = 0; for (yy = 0; yy < outputHeight; yy++) { for (xx = 0; xx < outputWidth; xx++) { real *ptr_input = input_data + p * nInputPlane * inputWidth * inputHeight + k * inputWidth * inputHeight + yy * dH * inputWidth + xx * dW; real z = *ptr_gradOutput++; long kx, ky; for (ky = 0; ky < kH; ky++) { for (kx = 0; kx < kW; kx++) sum += z * ptr_input[kx]; ptr_input += inputWidth; } } } gradWeight_data[k] += scale * sum; } } THTensor_(free) (input); THTensor_(free) (gradOutput); } #endif
c-omp.c
/* This file contains routines to construct OpenACC and OpenMP constructs, called from parsing in the C and C++ front ends. Copyright (C) 2005-2017 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>, Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "options.h" #include "c-common.h" #include "gimple-expr.h" #include "c-pragma.h" #include "omp-general.h" #include "gomp-constants.h" /* Complete a #pragma oacc wait construct. LOC is the location of the #pragma. */ tree c_finish_oacc_wait (location_t loc, tree parms, tree clauses) { const int nparms = list_length (parms); tree stmt, t; vec<tree, va_gc> *args; vec_alloc (args, nparms + 2); stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC)) t = OMP_CLAUSE_ASYNC_EXPR (clauses); else t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); args->quick_push (t); args->quick_push (build_int_cst (integer_type_node, nparms)); for (t = parms; t; t = TREE_CHAIN (t)) { if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) args->quick_push (build_int_cst (integer_type_node, TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); else args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); } stmt = build_call_expr_loc_vec (loc, stmt, args); vec_free (args); return stmt; } /* Complete a #pragma omp master construct. STMT is the structured-block that follows the pragma. LOC is the l*/ tree c_finish_omp_master (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a #pragma omp taskgroup construct. STMT is the structured-block that follows the pragma. LOC is the l*/ tree c_finish_omp_taskgroup (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a #pragma omp critical construct. STMT is the structured-block that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses) { tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; OMP_CRITICAL_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp ordered construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered (location_t loc, tree clauses, tree stmt) { tree t = make_node (OMP_ORDERED); TREE_TYPE (t) = void_type_node; OMP_ORDERED_BODY (t) = stmt; OMP_ORDERED_CLAUSES (t) = clauses; SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Complete a #pragma omp barrier construct. LOC is the location of the #pragma. */ void c_finish_omp_barrier (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskwait construct. LOC is the location of the pragma. */ void c_finish_omp_taskwait (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskyield construct. LOC is the location of the pragma. */ void c_finish_omp_taskyield (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC the expression to be implemented atomically is LHS opcode= RHS. For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS opcode= RHS with the new or old content of LHS returned. LOC is the location of the atomic statement. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC* node which should be added to the current statement tree with add_stmt. If TEST is set, avoid calling save_expr or create_tmp_var*. */ tree c_finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst, bool test) { tree x, type, addr, pre = NULL_TREE; if (lhs == error_mark_node || rhs == error_mark_node || v == error_mark_node || lhs1 == error_mark_node || rhs1 == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } if (TYPE_ATOMIC (type)) { error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>"); return error_mark_node; } if (opcode == RDIV_EXPR) opcode = TRUNC_DIV_EXPR; /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, false); if (addr == error_mark_node) return error_mark_node; if (!test) addr = save_expr (addr); if (!test && TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || !VAR_P (TREE_OPERAND (addr, 0)))) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr)); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (loc, addr, RO_NULL); if (code == OMP_ATOMIC_READ) { x = build1 (OMP_ATOMIC_READ, type, addr); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); } /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ if (swapped) { rhs = build_binary_op (loc, opcode, rhs, lhs, 1); opcode = NOP_EXPR; } bool save = in_late_binary_op; in_late_binary_op = true; x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); in_late_binary_op = save; if (x == error_mark_node) return error_mark_node; if (TREE_CODE (x) == COMPOUND_EXPR) { pre = TREE_OPERAND (x, 0); gcc_assert (TREE_CODE (pre) == SAVE_EXPR); x = TREE_OPERAND (x, 1); } gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ if (code == OMP_ATOMIC) type = void_type_node; x = build2 (code, type, addr, rhs); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (rhs1 && VAR_P (rhs1) && VAR_P (lhs) && rhs1 != lhs && !test) { if (code == OMP_ATOMIC) error_at (loc, "%<#pragma omp atomic update%> uses two different " "variables for memory"); else error_at (loc, "%<#pragma omp atomic capture%> uses two different " "variables for memory"); return error_mark_node; } if (code != OMP_ATOMIC) { /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (lhs1 && VAR_P (lhs1) && VAR_P (lhs)) { if (lhs1 != lhs && !test) { error_at (loc, "%<#pragma omp atomic capture%> uses two " "different variables for memory"); return error_mark_node; } } x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (lhs1 && lhs1 != lhs) { tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false); if (lhs1addr == error_mark_node) return error_mark_node; if (code == OMP_ATOMIC_CAPTURE_OLD) x = omit_one_operand_loc (loc, type, x, lhs1addr); else { if (!test) x = save_expr (x); x = omit_two_operands_loc (loc, type, x, x, lhs1addr); } } } else if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (pre) x = omit_one_operand_loc (loc, type, x, pre); return x; } /* Complete a #pragma omp flush construct. We don't do anything with the variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Check and canonicalize OMP_FOR increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; case COMPOUND_EXPR: { /* cp_build_modify_expr forces preevaluation of the RHS to make sure that it is evaluated before the lvalue-rvalue conversion is applied to the LHS. Reconstruct the original expression. */ tree op0 = TREE_OPERAND (exp, 0); if (TREE_CODE (op0) == TARGET_EXPR && !VOID_TYPE_P (TREE_TYPE (op0))) { tree op1 = TREE_OPERAND (exp, 1); tree temp = TARGET_EXPR_SLOT (op0); if (BINARY_CLASS_P (op1) && TREE_OPERAND (op1, 1) == temp) { op1 = copy_node (op1); TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); return check_omp_for_incr_expr (loc, op1, decl); } } break; } default: break; } return error_mark_node; } /* If the OMP_FOR increment expression in INCR is of pointer type, canonicalize it into an expression handled by gimplify_omp_for() and return it. DECL is the iteration variable. */ static tree c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) { if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (loc, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); t = fold_build_pointer_plus (decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } return incr; } /* Validate and generate OMP_FOR. DECLV is a vector of iteration variables, for each collapsed loop. ORIG_DECLV, if non-NULL, is a vector with the original iteration variables (prior to any transformations, by say, C++ iterators). INITV, CONDV and INCRV are vectors containing initialization expressions, controlling predicates and increment expressions. BODY is the body of the loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; if ((code == CILK_SIMD || code == CILK_FOR) && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0))) fail = true; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } else if (TYPE_ATOMIC (TREE_TYPE (decl))) { error_at (elocus, "%<_Atomic%> iteration variable %qE", decl); fail = true; /* _Atomic iterator confuses stuff too much, so we risk ICE trying to diagnose it further. */ continue; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } DECL_INITIAL (decl) = NULL_TREE; init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } if (init != error_mark_node) { gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); } if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with evaluation of the vla VAR_DECL. We need to readd them to the non-decl operand. See PR45784. */ while (TREE_CODE (cond) == COMPOUND_EXPR) cond = TREE_OPERAND (cond, 1); if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) { if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } if (cond_ok && TREE_VEC_ELT (condv, i) != cond) { tree ce = NULL_TREE, *pce = &ce; tree type = TREE_TYPE (TREE_OPERAND (cond, 1)); for (tree c = TREE_VEC_ELT (condv, i); c != cond; c = TREE_OPERAND (c, 1)) { *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0), TREE_OPERAND (cond, 1)); pce = &TREE_OPERAND (*pce, 1); } TREE_OPERAND (cond, 1) = ce; TREE_VEC_ELT (condv, i) = cond; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) break; incr = TREE_OPERAND (incr, 1); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (code); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_ORIG_DECLS (t) = orig_declv; SET_EXPR_LOCATION (t, locus); return t; } } /* Type for passing data in between c_omp_check_loop_iv and c_omp_check_loop_iv_r. */ struct c_omp_check_loop_iv_data { tree declv; bool fail; location_t stmt_loc; location_t expr_loc; int kind; walk_tree_lh lh; hash_set<tree> *ppset; }; /* Helper function called via walk_tree, to diagnose uses of associated loop IVs inside of lb, b and incr expressions of OpenMP loops. */ static tree c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data) { struct c_omp_check_loop_iv_data *d = (struct c_omp_check_loop_iv_data *) data; if (DECL_P (*tp)) { int i; for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++) if (*tp == TREE_VEC_ELT (d->declv, i)) { location_t loc = d->expr_loc; if (loc == UNKNOWN_LOCATION) loc = d->stmt_loc; switch (d->kind) { case 0: error_at (loc, "initializer expression refers to " "iteration variable %qD", *tp); break; case 1: error_at (loc, "condition expression refers to " "iteration variable %qD", *tp); break; case 2: error_at (loc, "increment expression refers to " "iteration variable %qD", *tp); break; } d->fail = true; } } /* Don't walk dtors added by C++ wrap_cleanups_r. */ else if (TREE_CODE (*tp) == TRY_CATCH_EXPR && TRY_CATCH_IS_CLEANUP (*tp)) { *walk_subtrees = 0; return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data, d->ppset, d->lh); } return NULL_TREE; } /* Diagnose invalid references to loop iterators in lb, b and incr expressions. */ bool c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; int i; data.declv = declv; data.fail = false; data.stmt_loc = EXPR_LOCATION (stmt); data.lh = lh; data.ppset = &pset; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++) { tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i); gcc_assert (TREE_CODE (init) == MODIFY_EXPR); tree decl = TREE_OPERAND (init, 0); tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i); gcc_assert (COMPARISON_CLASS_P (cond)); gcc_assert (TREE_OPERAND (cond, 0) == decl); tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i); data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1)); data.kind = 0; walk_tree_1 (&TREE_OPERAND (init, 1), c_omp_check_loop_iv_r, &data, &pset, lh); /* Don't warn for C++ random access iterators here, the expression then involves the subtraction and always refers to the original value. The C++ FE needs to warn on those earlier. */ if (decl == TREE_VEC_ELT (declv, i)) { data.expr_loc = EXPR_LOCATION (cond); data.kind = 1; walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, &pset, lh); } if (TREE_CODE (incr) == MODIFY_EXPR) { gcc_assert (TREE_OPERAND (incr, 0) == decl); incr = TREE_OPERAND (incr, 1); data.kind = 2; if (TREE_CODE (incr) == PLUS_EXPR && TREE_OPERAND (incr, 1) == decl) { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0)); walk_tree_1 (&TREE_OPERAND (incr, 0), c_omp_check_loop_iv_r, &data, &pset, lh); } else { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1)); walk_tree_1 (&TREE_OPERAND (incr, 1), c_omp_check_loop_iv_r, &data, &pset, lh); } } } return !data.fail; } /* Similar, but allows to check the init or cond expressions individually. */ bool c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl, tree init, tree cond, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; data.declv = declv; data.fail = false; data.stmt_loc = stmt_loc; data.lh = lh; data.ppset = &pset; if (init) { data.expr_loc = EXPR_LOCATION (init); data.kind = 0; walk_tree_1 (&init, c_omp_check_loop_iv_r, &data, &pset, lh); } if (cond) { gcc_assert (COMPARISON_CLASS_P (cond)); data.expr_loc = EXPR_LOCATION (init); data.kind = 1; if (TREE_OPERAND (cond, 0) == decl) walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, &pset, lh); else walk_tree_1 (&TREE_OPERAND (cond, 0), c_omp_check_loop_iv_r, &data, &pset, lh); } return !data.fail; } /* This function splits clauses for OpenACC combined loop constructs. OpenACC combined loop constructs are: #pragma acc kernels loop #pragma acc parallel loop */ tree c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses, bool is_parallel) { tree next, loop_clauses, nc; loop_clauses = *not_loop_clauses = NULL_TREE; for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* Loop clauses. */ case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_TILE: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_PRIVATE: OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Reductions must be duplicated on both constructs. */ case OMP_CLAUSE_REDUCTION: if (is_parallel) { nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (nc) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses; *not_loop_clauses = nc; } OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Parallel/kernels clauses. */ default: OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses; *not_loop_clauses = clauses; break; } } return loop_clauses; } /* This function attempts to split or duplicate clauses for OpenMP combined/composite constructs. Right now there are 21 different constructs. CODE is the innermost construct in the combined construct, and MASK allows to determine which constructs are combined together, as every construct has at least one clause that no other construct has (except for OMP_SECTIONS, but that can be only combined with parallel). OpenMP combined/composite constructs are: #pragma omp distribute parallel for #pragma omp distribute parallel for simd #pragma omp distribute simd #pragma omp for simd #pragma omp parallel for #pragma omp parallel for simd #pragma omp parallel sections #pragma omp target parallel #pragma omp target parallel for #pragma omp target parallel for simd #pragma omp target teams #pragma omp target teams distribute #pragma omp target teams distribute parallel for #pragma omp target teams distribute parallel for simd #pragma omp target teams distribute simd #pragma omp target simd #pragma omp taskloop simd #pragma omp teams distribute #pragma omp teams distribute parallel for #pragma omp teams distribute parallel for simd #pragma omp teams distribute simd */ void c_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* Add implicit nowait clause on #pragma omp parallel {for,for simd,sections}. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_DEPEND: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_FOR; if (code != OMP_SIMD) OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_ALIGNED: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_PRIORITY: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; /* Duplicate this to all of taskloop, distribute, for and simd. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { /* This must be #pragma omp target simd */ s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* Private clause is supported on all constructs, it is enough to put it on the innermost one. For #pragma omp {for,sections} put it on parallel though, as that's what we did for OpenMP 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; default: gcc_unreachable (); } break; /* Firstprivate clause is supported on all constructs but simd. Put it on the outermost of those and duplicate on teams and parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (code == OMP_SIMD && (mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) { /* This must be #pragma omp target simd. */ s = C_OMP_CLAUSE_SPLIT_TARGET; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else /* This must be #pragma omp parallel{, for{, simd}, sections} or #pragma omp target parallel. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* This must be one of #pragma omp {,target }teams distribute #pragma omp target teams #pragma omp {,target }teams distribute simd. */ gcc_assert (code == OMP_DISTRIBUTE || code == OMP_TEAMS || code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be #pragma omp distribute simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { /* This must be #pragma omp taskloop simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else { /* This must be #pragma omp for simd. */ gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* Lastprivate is allowed on distribute, for, sections and simd. In parallel {for{, simd},sections} we actually want to put it on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_DISTRIBUTE) { s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; } if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } gcc_assert (code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN (c) = cclauses[s]; cclauses[s] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Shared and default clauses are allowed on parallel, teams and taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* Reduction is allowed on simd, for, parallel, sections and teams. Duplicate it on all of them, but omit on for or sections if parallel is present. */ case OMP_CLAUSE_REDUCTION: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS || code == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IF: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET) s = C_OMP_CLAUSE_SPLIT_TARGET; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else { error_at (OMP_CLAUSE_LOCATION (clauses), "expected %<parallel%> or %<target%> %<if%> " "clause modifier"); continue; } } else s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_LINEAR: /* Linear clause is allowed on simd and for. Put it on the innermost construct. */ if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_NOWAIT: /* Nowait clause is allowed on target, for and sections, but is not allowed on parallel for or parallel sections. Therefore, put it on target construct if present, because that can only be combined with parallel for{, simd} and not with for{, simd}, otherwise to the worksharing construct. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; else s = C_OMP_CLAUSE_SPLIT_FOR; break; default: gcc_unreachable (); } OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } if (!flag_checking) return; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 && code != OMP_SECTIONS) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); if (code != OMP_SIMD) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); } /* qsort callback to compare #pragma omp declare simd clauses. */ static int c_omp_declare_simd_clause_cmp (const void *p, const void *q) { tree a = *(const tree *) p; tree b = *(const tree *) q; if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) { if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) return -1; return 1; } if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) { int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); if (c < d) return 1; if (c > d) return -1; } return 0; } /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd CLAUSES on FNDECL into argument indexes and sort them. */ tree c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) { tree c; vec<tree> clvec = vNULL; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { tree decl = OMP_CLAUSE_DECL (c); tree arg; int idx; for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { decl = OMP_CLAUSE_LINEAR_STEP (c); for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (integer_type_node, idx); } } clvec.safe_push (c); } if (!clvec.is_empty ()) { unsigned int len = clvec.length (), i; clvec.qsort (c_omp_declare_simd_clause_cmp); clauses = clvec[0]; for (i = 0; i < len; i++) OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; } else clauses = NULL_TREE; clvec.release (); return clauses; } /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ void c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) { tree c; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; tree arg; for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_DECL (c) = arg; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c)); for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_LINEAR_STEP (c) = arg; } } } /* True if OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Variables with const-qualified type having no mutable member are predetermined shared. */ if (TREE_READONLY (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; }
#include "config.h" #include "system.h" #include "coretypes.h" #include "options.h" #include "c-common.h" #include "gimple-expr.h" #include "c-pragma.h" #include "omp-general.h" #include "gomp-constants.h" /* Complete a #pragma oacc wait construct. LOC is the location of the #pragma. */ tree c_finish_oacc_wait (location_t loc, tree parms, tree clauses) { const int nparms = list_length (parms); tree stmt, t; vec<tree, va_gc> *args; vec_alloc (args, nparms + 2); stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT); if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC)) t = OMP_CLAUSE_ASYNC_EXPR (clauses); else t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC); args->quick_push (t); args->quick_push (build_int_cst (integer_type_node, nparms)); for (t = parms; t; t = TREE_CHAIN (t)) { if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST) args->quick_push (build_int_cst (integer_type_node, TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t)))); else args->quick_push (OMP_CLAUSE_WAIT_EXPR (t)); } stmt = build_call_expr_loc_vec (loc, stmt, args); vec_free (args); return stmt; } /* Complete a that follows the pragma. LOC is the l*/ tree c_finish_omp_master (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a that follows the pragma. LOC is the l*/ tree c_finish_omp_taskgroup (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses) { tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; OMP_CRITICAL_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered (location_t loc, tree clauses, tree stmt) { tree t = make_node (OMP_ORDERED); TREE_TYPE (t) = void_type_node; OMP_ORDERED_BODY (t) = stmt; OMP_ORDERED_CLAUSES (t) = clauses; SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Complete a the #pragma. */ void c_finish_omp_barrier (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a pragma. */ void c_finish_omp_taskwait (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a pragma. */ void c_finish_omp_taskyield (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a the expression to be implemented atomically is LHS opcode= RHS. For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS opcode= RHS with the new or old content of LHS returned. LOC is the location of the atomic statement. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC* node which should be added to the current statement tree with add_stmt. If TEST is set, avoid calling save_expr or create_tmp_var*. */ tree c_finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst, bool test) { tree x, type, addr, pre = NULL_TREE; if (lhs == error_mark_node || rhs == error_mark_node || v == error_mark_node || lhs1 == error_mark_node || rhs1 == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %< return error_mark_node; } if (TYPE_ATOMIC (type)) { error_at (loc, "%<_Atomic%> expression in %< return error_mark_node; } if (opcode == RDIV_EXPR) opcode = TRUNC_DIV_EXPR; /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, false); if (addr == error_mark_node) return error_mark_node; if (!test) addr = save_expr (addr); if (!test && TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || !VAR_P (TREE_OPERAND (addr, 0)))) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr)); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (loc, addr, RO_NULL); if (code == OMP_ATOMIC_READ) { x = build1 (OMP_ATOMIC_READ, type, addr); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); } /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ if (swapped) { rhs = build_binary_op (loc, opcode, rhs, lhs, 1); opcode = NOP_EXPR; } bool save = in_late_binary_op; in_late_binary_op = true; x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); in_late_binary_op = save; if (x == error_mark_node) return error_mark_node; if (TREE_CODE (x) == COMPOUND_EXPR) { pre = TREE_OPERAND (x, 0); gcc_assert (TREE_CODE (pre) == SAVE_EXPR); x = TREE_OPERAND (x, 1); } gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ if (code == OMP_ATOMIC) type = void_type_node; x = build2 (code, type, addr, rhs); SET_EXPR_LOCATION (x, loc); OMP_ATOMIC_SEQ_CST (x) = seq_cst; /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (rhs1 && VAR_P (rhs1) && VAR_P (lhs) && rhs1 != lhs && !test) { if (code == OMP_ATOMIC) error_at (loc, "%< "variables for memory"); else error_at (loc, "%< "variables for memory"); return error_mark_node; } if (code != OMP_ATOMIC) { /* Generally it is hard to prove lhs1 and lhs are the same memory location, just diagnose different variables. */ if (lhs1 && VAR_P (lhs1) && VAR_P (lhs)) { if (lhs1 != lhs && !test) { error_at (loc, "%< "different variables for memory"); return error_mark_node; } } x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (lhs1 && lhs1 != lhs) { tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false); if (lhs1addr == error_mark_node) return error_mark_node; if (code == OMP_ATOMIC_CAPTURE_OLD) x = omit_one_operand_loc (loc, type, x, lhs1addr); else { if (!test) x = save_expr (x); x = omit_two_operands_loc (loc, type, x, x, lhs1addr); } } } else if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc (loc, type, x, rhs1addr); } if (pre) x = omit_one_operand_loc (loc, type, x, pre); return x; } /* Complete a the variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush (location_t loc) { tree x; x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Check and canonicalize OMP_FOR increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; case COMPOUND_EXPR: { /* cp_build_modify_expr forces preevaluation of the RHS to make sure that it is evaluated before the lvalue-rvalue conversion is applied to the LHS. Reconstruct the original expression. */ tree op0 = TREE_OPERAND (exp, 0); if (TREE_CODE (op0) == TARGET_EXPR && !VOID_TYPE_P (TREE_TYPE (op0))) { tree op1 = TREE_OPERAND (exp, 1); tree temp = TARGET_EXPR_SLOT (op0); if (BINARY_CLASS_P (op1) && TREE_OPERAND (op1, 1) == temp) { op1 = copy_node (op1); TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0); return check_omp_for_incr_expr (loc, op1, decl); } } break; } default: break; } return error_mark_node; } /* If the OMP_FOR increment expression in INCR is of pointer type, canonicalize it into an expression handled by gimplify_omp_for() and return it. DECL is the iteration variable. */ static tree c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr) { if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (loc, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t); t = fold_build_pointer_plus (decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } return incr; } /* Validate and generate OMP_FOR. DECLV is a vector of iteration variables, for each collapsed loop. ORIG_DECLV, if non-NULL, is a vector with the original iteration variables (prior to any transformations, by say, C++ iterators). INITV, CONDV and INCRV are vectors containing initialization expressions, controlling predicates and increment expressions. BODY is the body of the loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; if ((code == CILK_SIMD || code == CILK_FOR) && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0))) fail = true; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } else if (TYPE_ATOMIC (TREE_TYPE (decl))) { error_at (elocus, "%<_Atomic%> iteration variable %qE", decl); fail = true; /* _Atomic iterator confuses stuff too much, so we risk ICE trying to diagnose it further. */ continue; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } DECL_INITIAL (decl) = NULL_TREE; init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } if (init != error_mark_node) { gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); } if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with evaluation of the vla VAR_DECL. We need to readd them to the non-decl operand. See PR45784. */ while (TREE_CODE (cond) == COMPOUND_EXPR) cond = TREE_OPERAND (cond, 1); if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) { if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } if (cond_ok && TREE_VEC_ELT (condv, i) != cond) { tree ce = NULL_TREE, *pce = &ce; tree type = TREE_TYPE (TREE_OPERAND (cond, 1)); for (tree c = TREE_VEC_ELT (condv, i); c != cond; c = TREE_OPERAND (c, 1)) { *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0), TREE_OPERAND (cond, 1)); pce = &TREE_OPERAND (*pce, 1); } TREE_OPERAND (cond, 1) = ce; TREE_VEC_ELT (condv, i) = cond; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr); break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR) break; incr = TREE_OPERAND (incr, 1); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (code); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; OMP_FOR_ORIG_DECLS (t) = orig_declv; SET_EXPR_LOCATION (t, locus); return t; } } /* Type for passing data in between c_omp_check_loop_iv and c_omp_check_loop_iv_r. */ struct c_omp_check_loop_iv_data { tree declv; bool fail; location_t stmt_loc; location_t expr_loc; int kind; walk_tree_lh lh; hash_set<tree> *ppset; }; /* Helper function called via walk_tree, to diagnose uses of associated loop IVs inside of lb, b and incr expressions of OpenMP loops. */ static tree c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data) { struct c_omp_check_loop_iv_data *d = (struct c_omp_check_loop_iv_data *) data; if (DECL_P (*tp)) { int i; for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++) if (*tp == TREE_VEC_ELT (d->declv, i)) { location_t loc = d->expr_loc; if (loc == UNKNOWN_LOCATION) loc = d->stmt_loc; switch (d->kind) { case 0: error_at (loc, "initializer expression refers to " "iteration variable %qD", *tp); break; case 1: error_at (loc, "condition expression refers to " "iteration variable %qD", *tp); break; case 2: error_at (loc, "increment expression refers to " "iteration variable %qD", *tp); break; } d->fail = true; } } /* Don't walk dtors added by C++ wrap_cleanups_r. */ else if (TREE_CODE (*tp) == TRY_CATCH_EXPR && TRY_CATCH_IS_CLEANUP (*tp)) { *walk_subtrees = 0; return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data, d->ppset, d->lh); } return NULL_TREE; } /* Diagnose invalid references to loop iterators in lb, b and incr expressions. */ bool c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; int i; data.declv = declv; data.fail = false; data.stmt_loc = EXPR_LOCATION (stmt); data.lh = lh; data.ppset = &pset; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++) { tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i); gcc_assert (TREE_CODE (init) == MODIFY_EXPR); tree decl = TREE_OPERAND (init, 0); tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i); gcc_assert (COMPARISON_CLASS_P (cond)); gcc_assert (TREE_OPERAND (cond, 0) == decl); tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i); data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1)); data.kind = 0; walk_tree_1 (&TREE_OPERAND (init, 1), c_omp_check_loop_iv_r, &data, &pset, lh); /* Don't warn for C++ random access iterators here, the expression then involves the subtraction and always refers to the original value. The C++ FE needs to warn on those earlier. */ if (decl == TREE_VEC_ELT (declv, i)) { data.expr_loc = EXPR_LOCATION (cond); data.kind = 1; walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, &pset, lh); } if (TREE_CODE (incr) == MODIFY_EXPR) { gcc_assert (TREE_OPERAND (incr, 0) == decl); incr = TREE_OPERAND (incr, 1); data.kind = 2; if (TREE_CODE (incr) == PLUS_EXPR && TREE_OPERAND (incr, 1) == decl) { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0)); walk_tree_1 (&TREE_OPERAND (incr, 0), c_omp_check_loop_iv_r, &data, &pset, lh); } else { data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1)); walk_tree_1 (&TREE_OPERAND (incr, 1), c_omp_check_loop_iv_r, &data, &pset, lh); } } } return !data.fail; } /* Similar, but allows to check the init or cond expressions individually. */ bool c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl, tree init, tree cond, walk_tree_lh lh) { hash_set<tree> pset; struct c_omp_check_loop_iv_data data; data.declv = declv; data.fail = false; data.stmt_loc = stmt_loc; data.lh = lh; data.ppset = &pset; if (init) { data.expr_loc = EXPR_LOCATION (init); data.kind = 0; walk_tree_1 (&init, c_omp_check_loop_iv_r, &data, &pset, lh); } if (cond) { gcc_assert (COMPARISON_CLASS_P (cond)); data.expr_loc = EXPR_LOCATION (init); data.kind = 1; if (TREE_OPERAND (cond, 0) == decl) walk_tree_1 (&TREE_OPERAND (cond, 1), c_omp_check_loop_iv_r, &data, &pset, lh); else walk_tree_1 (&TREE_OPERAND (cond, 0), c_omp_check_loop_iv_r, &data, &pset, lh); } return !data.fail; } /* This function splits clauses for OpenACC combined loop constructs. OpenACC combined loop constructs are: #pragma acc kernels loop #pragma acc parallel loop */ tree c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses, bool is_parallel) { tree next, loop_clauses, nc; loop_clauses = *not_loop_clauses = NULL_TREE; for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* Loop clauses. */ case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_TILE: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_PRIVATE: OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Reductions must be duplicated on both constructs. */ case OMP_CLAUSE_REDUCTION: if (is_parallel) { nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (nc) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses; *not_loop_clauses = nc; } OMP_CLAUSE_CHAIN (clauses) = loop_clauses; loop_clauses = clauses; break; /* Parallel/kernels clauses. */ default: OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses; *not_loop_clauses = clauses; break; } } return loop_clauses; } /* This function attempts to split or duplicate clauses for OpenMP combined/composite constructs. Right now there are 21 different constructs. CODE is the innermost construct in the combined construct, and MASK allows to determine which constructs are combined together, as every construct has at least one clause that no other construct has (except for OMP_SECTIONS, but that can be only combined with parallel). OpenMP combined/composite constructs are: void c_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* Add implicit nowait clause on if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_DEPEND: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_FOR; if (code != OMP_SIMD) OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_ALIGNED: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_PRIORITY: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; /* Duplicate this to all of taskloop, distribute, for and simd. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { /* This must be s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = OMP_CLAUSE_COLLAPSE_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* Private clause is supported on all constructs, it is enough to put it on the innermost one. For as that's what we did for OpenMP 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; default: gcc_unreachable (); } break; /* Firstprivate clause is supported on all constructs but simd. Put it on the outermost of those and duplicate on teams and parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (code == OMP_SIMD && (mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) { /* This must be s = C_OMP_CLAUSE_SPLIT_TARGET; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else /* This must be or s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* This must be one of gcc_assert (code == OMP_DISTRIBUTE || code == OMP_TEAMS || code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { /* This must be gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else { /* This must be gcc_assert (code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* Lastprivate is allowed on distribute, for, sections and simd. In parallel {for{, simd},sections} we actually want to put it on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_DISTRIBUTE) { s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; } if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } gcc_assert (code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN (c) = cclauses[s]; cclauses[s] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* Shared and default clauses are allowed on parallel, teams and taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_CODE (clauses)); if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); else OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_KIND (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* Reduction is allowed on simd, for, parallel, sections and teams. Duplicate it on all of them, but omit on for or sections if parallel is present. */ case OMP_CLAUSE_REDUCTION: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses); OMP_CLAUSE_REDUCTION_CODE (c) = OMP_CLAUSE_REDUCTION_CODE (clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS || code == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IF: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == OMP_TARGET) s = C_OMP_CLAUSE_SPLIT_TARGET; else if (OMP_CLAUSE_IF_MODIFIER (clauses) == ERROR_MARK) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER (c) = OMP_CLAUSE_IF_MODIFIER (clauses); OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses); OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else { error_at (OMP_CLAUSE_LOCATION (clauses), "expected %<parallel%> or %<target%> %<if%> " "clause modifier"); continue; } } else s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_LINEAR: /* Linear clause is allowed on simd and for. Put it on the innermost construct. */ if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_NOWAIT: /* Nowait clause is allowed on target, for and sections, but is not allowed on parallel for or parallel sections. Therefore, put it on target construct if present, because that can only be combined with parallel for{, simd} and not with for{, simd}, otherwise to the worksharing construct. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; else s = C_OMP_CLAUSE_SPLIT_FOR; break; default: gcc_unreachable (); } OMP_CLAUSE_CHAIN (clauses) = cclauses[s]; cclauses[s] = clauses; } if (!flag_checking) return; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 && code != OMP_SECTIONS) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); if (code != OMP_SIMD) gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); } /* qsort callback to compare static int c_omp_declare_simd_clause_cmp (const void *p, const void *q) { tree a = *(const tree *) p; tree b = *(const tree *) q; if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b)) { if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b)) return -1; return 1; } if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) { int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); if (c < d) return 1; if (c > d) return -1; } return 0; } /* Change PARM_DECLs in OMP_CLAUSE_DECL of CLAUSES on FNDECL into argument indexes and sort them. */ tree c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses) { tree c; vec<tree> clvec = vNULL; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { tree decl = OMP_CLAUSE_DECL (c); tree arg; int idx; for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { decl = OMP_CLAUSE_LINEAR_STEP (c); for (arg = parms, idx = 0; arg; arg = TREE_CHAIN (arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (integer_type_node, idx); } } clvec.safe_push (c); } if (!clvec.is_empty ()) { unsigned int len = clvec.length (), i; clvec.qsort (c_omp_declare_simd_clause_cmp); clauses = clvec[0]; for (i = 0; i < len; i++) OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; } else clauses = NULL_TREE; clvec.release (); return clauses; } /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ void c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) { tree c; for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; tree arg; for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_DECL (c) = arg; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c)) { idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c)); for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) if (i == idx) break; gcc_assert (arg); OMP_CLAUSE_LINEAR_STEP (c) = arg; } } } /* True if OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Variables with const-qualified type having no mutable member are predetermined shared. */ if (TREE_READONLY (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; }
#include "config.h" #include "system.h" #include "coretypes.h" #include "options.h" #include "c-common.h" #include "gimple-expr.h" #include "c-pragma.h" #include "omp-general.h" #include "gomp-constants.h" /* * Complete a #pragma oacc wait construct. LOC is the location of the * #pragma. */ tree c_finish_oacc_wait(location_t loc, tree parms, tree clauses) { const int nparms = list_length(parms); tree stmt, t; vec < tree, va_gc > *args; vec_alloc(args, nparms + 2); stmt = builtin_decl_explicit(BUILT_IN_GOACC_WAIT); if (omp_find_clause(clauses, OMP_CLAUSE_ASYNC)) t = OMP_CLAUSE_ASYNC_EXPR(clauses); else t = build_int_cst(integer_type_node, GOMP_ASYNC_SYNC); args->quick_push(t); args->quick_push(build_int_cst(integer_type_node, nparms)); for (t = parms; t; t = TREE_CHAIN(t)) { if (TREE_CODE(OMP_CLAUSE_WAIT_EXPR(t)) == INTEGER_CST) args->quick_push(build_int_cst(integer_type_node, TREE_INT_CST_LOW(OMP_CLAUSE_WAIT_EXPR(t)))); else args->quick_push(OMP_CLAUSE_WAIT_EXPR(t)); } stmt = build_call_expr_loc_vec(loc, stmt, args); vec_free(args); return stmt; } /* * Complete a #pragma omp master construct. STMT is the structured-block * that follows the pragma. LOC is the l */ tree c_finish_omp_master(location_t loc, tree stmt) { tree t = add_stmt(build1(OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION(t, loc); return t; } /* * Complete a #pragma omp taskgroup construct. STMT is the structured-block * that follows the pragma. LOC is the l */ tree c_finish_omp_taskgroup(location_t loc, tree stmt) { tree t = add_stmt(build1(OMP_TASKGROUP, void_type_node, stmt)); SET_EXPR_LOCATION(t, loc); return t; } /* * Complete a #pragma omp critical construct. STMT is the structured-block * that follows the pragma, NAME is the identifier in the pragma, or null if * it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical(location_t loc, tree body, tree name, tree clauses) { tree stmt = make_node(OMP_CRITICAL); TREE_TYPE(stmt) = void_type_node; OMP_CRITICAL_BODY(stmt) = body; OMP_CRITICAL_NAME(stmt) = name; OMP_CRITICAL_CLAUSES(stmt) = clauses; SET_EXPR_LOCATION(stmt, loc); return add_stmt(stmt); } /* * Complete a #pragma omp ordered construct. STMT is the structured-block * that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered(location_t loc, tree clauses, tree stmt) { tree t = make_node(OMP_ORDERED); TREE_TYPE(t) = void_type_node; OMP_ORDERED_BODY(t) = stmt; OMP_ORDERED_CLAUSES(t) = clauses; SET_EXPR_LOCATION(t, loc); return add_stmt(t); } /* * Complete a #pragma omp barrier construct. LOC is the location of the * #pragma. */ void c_finish_omp_barrier(location_t loc) { tree x; x = builtin_decl_explicit(BUILT_IN_GOMP_BARRIER); x = build_call_expr_loc(loc, x, 0); add_stmt(x); } /* * Complete a #pragma omp taskwait construct. LOC is the location of the * pragma. */ void c_finish_omp_taskwait(location_t loc) { tree x; x = builtin_decl_explicit(BUILT_IN_GOMP_TASKWAIT); x = build_call_expr_loc(loc, x, 0); add_stmt(x); } /* * Complete a #pragma omp taskyield construct. LOC is the location of the * pragma. */ void c_finish_omp_taskyield(location_t loc) { tree x; x = builtin_decl_explicit(BUILT_IN_GOMP_TASKYIELD); x = build_call_expr_loc(loc, x, 0); add_stmt(x); } /* * Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC the * expression to be implemented atomically is LHS opcode= RHS. For * OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS opcode= RHS * with the new or old content of LHS returned. LOC is the location of the * atomic statement. The value returned is either error_mark_node (if the * construct was erroneous) or an OMP_ATOMIC* node which should be added to * the current statement tree with add_stmt. If TEST is set, avoid calling * save_expr or create_tmp_var*. */ tree c_finish_omp_atomic(location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst, bool test) { tree x, type, addr, pre = NULL_TREE; if (lhs == error_mark_node || rhs == error_mark_node || v == error_mark_node || lhs1 == error_mark_node || rhs1 == error_mark_node) return error_mark_node; /* * ??? According to one reading of the OpenMP spec, complex type are * supported, but there are no atomic stores for any architecture. But at * least icc 9.0 doesn't support complex types here either. And lets not * even talk about vector types... */ type = TREE_TYPE(lhs); if (!INTEGRAL_TYPE_P(type) && !POINTER_TYPE_P(type) && !SCALAR_FLOAT_TYPE_P(type)) { error_at(loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } if (TYPE_ATOMIC(type)) { error_at(loc, "%<_Atomic%> expression in %<#pragma omp atomic%>"); return error_mark_node; } if (opcode == RDIV_EXPR) opcode = TRUNC_DIV_EXPR; /* ??? Validate that rhs does not overlap lhs. */ /* * Take and save the address of the lhs. From then on we'll reference it * via indirection. */ addr = build_unary_op(loc, ADDR_EXPR, lhs, false); if (addr == error_mark_node) return error_mark_node; if (!test) addr = save_expr(addr); if (!test && TREE_CODE(addr) != SAVE_EXPR && (TREE_CODE(addr) != ADDR_EXPR || !VAR_P(TREE_OPERAND(addr, 0)))) { /* * Make sure LHS is simple enough so that goa_lhs_expr_p can * recognize it even after unsharing function body. */ tree var = create_tmp_var_raw(TREE_TYPE(addr)); DECL_CONTEXT(var) = current_function_decl; addr = build4(TARGET_EXPR, TREE_TYPE(addr), var, addr, NULL, NULL); } lhs = build_indirect_ref(loc, addr, RO_NULL); if (code == OMP_ATOMIC_READ) { x = build1(OMP_ATOMIC_READ, type, addr); SET_EXPR_LOCATION(x, loc); OMP_ATOMIC_SEQ_CST(x) = seq_cst; return build_modify_expr(loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); } /* * There are lots of warnings, errors, and conversions that need to * happen in the course of interpreting a statement. Use the normal * mechanisms to do this, and then take it apart again. */ if (swapped) { rhs = build_binary_op(loc, opcode, rhs, lhs, 1); opcode = NOP_EXPR; } bool save = in_late_binary_op; in_late_binary_op = true; x = build_modify_expr(loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE); in_late_binary_op = save; if (x == error_mark_node) return error_mark_node; if (TREE_CODE(x) == COMPOUND_EXPR) { pre = TREE_OPERAND(x, 0); gcc_assert(TREE_CODE(pre) == SAVE_EXPR); x = TREE_OPERAND(x, 1); } gcc_assert(TREE_CODE(x) == MODIFY_EXPR); rhs = TREE_OPERAND(x, 1); /* Punt the actual generation of atomic operations to common code. */ if (code == OMP_ATOMIC) type = void_type_node; x = build2(code, type, addr, rhs); SET_EXPR_LOCATION(x, loc); OMP_ATOMIC_SEQ_CST(x) = seq_cst; /* * Generally it is hard to prove lhs1 and lhs are the same memory * location, just diagnose different variables. */ if (rhs1 && VAR_P(rhs1) && VAR_P(lhs) && rhs1 != lhs && !test) { if (code == OMP_ATOMIC) error_at(loc, "%<#pragma omp atomic update%> uses two different " "variables for memory"); else error_at(loc, "%<#pragma omp atomic capture%> uses two different " "variables for memory"); return error_mark_node; } if (code != OMP_ATOMIC) { /* * Generally it is hard to prove lhs1 and lhs are the same memory * location, just diagnose different variables. */ if (lhs1 && VAR_P(lhs1) && VAR_P(lhs)) { if (lhs1 != lhs && !test) { error_at(loc, "%<#pragma omp atomic capture%> uses two " "different variables for memory"); return error_mark_node; } } x = build_modify_expr(loc, v, NULL_TREE, NOP_EXPR, loc, x, NULL_TREE); if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op(loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc(loc, type, x, rhs1addr); } if (lhs1 && lhs1 != lhs) { tree lhs1addr = build_unary_op(loc, ADDR_EXPR, lhs1, false); if (lhs1addr == error_mark_node) return error_mark_node; if (code == OMP_ATOMIC_CAPTURE_OLD) x = omit_one_operand_loc(loc, type, x, lhs1addr); else { if (!test) x = save_expr(x); x = omit_two_operands_loc(loc, type, x, x, lhs1addr); } } } else if (rhs1 && rhs1 != lhs) { tree rhs1addr = build_unary_op(loc, ADDR_EXPR, rhs1, false); if (rhs1addr == error_mark_node) return error_mark_node; x = omit_one_operand_loc(loc, type, x, rhs1addr); } if (pre) x = omit_one_operand_loc(loc, type, x, pre); return x; } /* * Complete a #pragma omp flush construct. We don't do anything with the * variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush(location_t loc) { tree x; x = builtin_decl_explicit(BUILT_IN_SYNC_SYNCHRONIZE); x = build_call_expr_loc(loc, x, 0); add_stmt(x); } /* * Check and canonicalize OMP_FOR increment expression. Helper function for * c_finish_omp_for. */ static tree check_omp_for_incr_expr(location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P(TREE_TYPE(exp)) || TYPE_PRECISION(TREE_TYPE(exp)) < TYPE_PRECISION(TREE_TYPE(decl))) return error_mark_node; if (exp == decl) return build_int_cst(TREE_TYPE(exp), 0); switch (TREE_CODE(exp)) { CASE_CONVERT: t = check_omp_for_incr_expr(loc, TREE_OPERAND(exp, 0), decl); if (t != error_mark_node) return fold_convert_loc(loc, TREE_TYPE(exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr(loc, TREE_OPERAND(exp, 0), decl); if (t != error_mark_node) return fold_build2_loc(loc, MINUS_EXPR, TREE_TYPE(exp), t, TREE_OPERAND(exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr(loc, TREE_OPERAND(exp, 0), decl); if (t != error_mark_node) return fold_build2_loc(loc, PLUS_EXPR, TREE_TYPE(exp), t, TREE_OPERAND(exp, 1)); t = check_omp_for_incr_expr(loc, TREE_OPERAND(exp, 1), decl); if (t != error_mark_node) return fold_build2_loc(loc, PLUS_EXPR, TREE_TYPE(exp), TREE_OPERAND(exp, 0), t); break; case COMPOUND_EXPR: { /* * cp_build_modify_expr forces preevaluation of the RHS to make * sure that it is evaluated before the lvalue-rvalue conversion * is applied to the LHS. Reconstruct the original expression. */ tree op0 = TREE_OPERAND(exp, 0); if (TREE_CODE(op0) == TARGET_EXPR && !VOID_TYPE_P(TREE_TYPE(op0))) { tree op1 = TREE_OPERAND(exp, 1); tree temp = TARGET_EXPR_SLOT(op0); if (BINARY_CLASS_P(op1) && TREE_OPERAND(op1, 1) == temp) { op1 = copy_node(op1); TREE_OPERAND(op1, 1) = TARGET_EXPR_INITIAL(op0); return check_omp_for_incr_expr(loc, op1, decl); } } break; } default: break; } return error_mark_node; } /* * If the OMP_FOR increment expression in INCR is of pointer type, * canonicalize it into an expression handled by gimplify_omp_for() and * return it. DECL is the iteration variable. */ static tree c_omp_for_incr_canonicalize_ptr(location_t loc, tree decl, tree incr) { if (POINTER_TYPE_P(TREE_TYPE(decl)) && TREE_OPERAND(incr, 1)) { tree t = fold_convert_loc(loc, sizetype, TREE_OPERAND(incr, 1)); if (TREE_CODE(incr) == POSTDECREMENT_EXPR || TREE_CODE(incr) == PREDECREMENT_EXPR) t = fold_build1_loc(loc, NEGATE_EXPR, sizetype, t); t = fold_build_pointer_plus(decl, t); incr = build2(MODIFY_EXPR, void_type_node, decl, t); } return incr; } /* * Validate and generate OMP_FOR. DECLV is a vector of iteration variables, * for each collapsed loop. * * ORIG_DECLV, if non-NULL, is a vector with the original iteration variables * (prior to any transformations, by say, C++ iterators). * * INITV, CONDV and INCRV are vectors containing initialization expressions, * controlling predicates and increment expressions. BODY is the body of the * loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for(location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; if ((code == CILK_SIMD || code == CILK_FOR) && !c_check_cilk_loop(locus, TREE_VEC_ELT(declv, 0))) fail = true; gcc_assert(TREE_VEC_LENGTH(declv) == TREE_VEC_LENGTH(initv)); gcc_assert(TREE_VEC_LENGTH(declv) == TREE_VEC_LENGTH(condv)); gcc_assert(TREE_VEC_LENGTH(declv) == TREE_VEC_LENGTH(incrv)); for (i = 0; i < TREE_VEC_LENGTH(declv); i++) { tree decl = TREE_VEC_ELT(declv, i); tree init = TREE_VEC_ELT(initv, i); tree cond = TREE_VEC_ELT(condv, i); tree incr = TREE_VEC_ELT(incrv, i); elocus = locus; if (EXPR_HAS_LOCATION(init)) elocus = EXPR_LOCATION(init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P(TREE_TYPE(decl)) && TREE_CODE(TREE_TYPE(decl)) != POINTER_TYPE) { error_at(elocus, "invalid type for iteration variable %qE", decl); fail = true; } else if (TYPE_ATOMIC(TREE_TYPE(decl))) { error_at(elocus, "%<_Atomic%> iteration variable %qE", decl); fail = true; /* * _Atomic iterator confuses stuff too much, so we risk ICE * trying to diagnose it further. */ continue; } /* * In the case of "for (int i = 0...)", init will be a decl. It * should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION(decl); init = DECL_INITIAL(decl); if (init == NULL) { error_at(elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } DECL_INITIAL(decl) = NULL_TREE; init = build_modify_expr(elocus, decl, NULL_TREE, NOP_EXPR, /* * FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } if (init != error_mark_node) { gcc_assert(TREE_CODE(init) == MODIFY_EXPR); gcc_assert(TREE_OPERAND(init, 0) == decl); } if (cond == NULL_TREE) { error_at(elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; /* * E.g. C sizeof (vla) could add COMPOUND_EXPRs with evaluation * of the vla VAR_DECL. We need to readd them to the non-decl * operand. See PR45784. */ while (TREE_CODE(cond) == COMPOUND_EXPR) cond = TREE_OPERAND(cond, 1); if (EXPR_HAS_LOCATION(cond)) elocus = EXPR_LOCATION(cond); if (TREE_CODE(cond) == LT_EXPR || TREE_CODE(cond) == LE_EXPR || TREE_CODE(cond) == GT_EXPR || TREE_CODE(cond) == GE_EXPR || TREE_CODE(cond) == NE_EXPR || TREE_CODE(cond) == EQ_EXPR) { tree op0 = TREE_OPERAND(cond, 0); tree op1 = TREE_OPERAND(cond, 1); /* * 2.5.1. The comparison in the condition is computed in the * type of DECL, otherwise the behavior is undefined. * * For example: long n; int i; i < n; * * according to ISO will be evaluated as: (long)i < n; * * We want to force: i < (int)n; */ if (TREE_CODE(op0) == NOP_EXPR && decl == TREE_OPERAND(op0, 0)) { TREE_OPERAND(cond, 0) = TREE_OPERAND(op0, 0); TREE_OPERAND(cond, 1) = fold_build1_loc(elocus, NOP_EXPR, TREE_TYPE(decl), TREE_OPERAND(cond, 1)); } else if (TREE_CODE(op1) == NOP_EXPR && decl == TREE_OPERAND(op1, 0)) { TREE_OPERAND(cond, 1) = TREE_OPERAND(op1, 0); TREE_OPERAND(cond, 0) = fold_build1_loc(elocus, NOP_EXPR, TREE_TYPE(decl), TREE_OPERAND(cond, 0)); } if (decl == TREE_OPERAND(cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND(cond, 1)) { TREE_SET_CODE(cond, swap_tree_comparison(TREE_CODE(cond))); TREE_OPERAND(cond, 1) = TREE_OPERAND(cond, 0); TREE_OPERAND(cond, 0) = decl; cond_ok = true; } if (TREE_CODE(cond) == NE_EXPR || TREE_CODE(cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P(TREE_TYPE(decl))) { if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } else if (operand_equal_p(TREE_OPERAND(cond, 1), TYPE_MIN_VALUE(TREE_TYPE(decl)), 0)) TREE_SET_CODE(cond, TREE_CODE(cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p(TREE_OPERAND(cond, 1), TYPE_MAX_VALUE(TREE_TYPE(decl)), 0)) TREE_SET_CODE(cond, TREE_CODE(cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else if (code != CILK_SIMD && code != CILK_FOR) cond_ok = false; } if (cond_ok && TREE_VEC_ELT(condv, i) != cond) { tree ce = NULL_TREE, *pce = &ce; tree type = TREE_TYPE(TREE_OPERAND(cond, 1)); for (tree c = TREE_VEC_ELT(condv, i); c != cond; c = TREE_OPERAND(c, 1)) { *pce = build2(COMPOUND_EXPR, type, TREE_OPERAND(c, 0), TREE_OPERAND(cond, 1)); pce = &TREE_OPERAND(*pce, 1); } TREE_OPERAND(cond, 1) = ce; TREE_VEC_ELT(condv, i) = cond; } } if (!cond_ok) { error_at(elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at(elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION(incr)) elocus = EXPR_LOCATION(incr); /* * Check all the valid increment expressions: v++, v--, ++v, --v, * v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE(incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND(incr, 0) != decl) break; incr_ok = true; incr = c_omp_for_incr_canonicalize_ptr(elocus, decl, incr); break; case COMPOUND_EXPR: if (TREE_CODE(TREE_OPERAND(incr, 0)) != SAVE_EXPR || TREE_CODE(TREE_OPERAND(incr, 1)) != MODIFY_EXPR) break; incr = TREE_OPERAND(incr, 1); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_OPERAND(incr, 0) != decl) break; if (TREE_OPERAND(incr, 1) == decl) break; if (TREE_CODE(TREE_OPERAND(incr, 1)) == PLUS_EXPR && (TREE_OPERAND(TREE_OPERAND(incr, 1), 0) == decl || TREE_OPERAND(TREE_OPERAND(incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE(TREE_OPERAND(incr, 1)) == MINUS_EXPR || (TREE_CODE(TREE_OPERAND(incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND(TREE_OPERAND(incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr(elocus, TREE_OPERAND(incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2(PLUS_EXPR, TREE_TYPE(decl), decl, t); incr = build2(MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at(elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT(initv, i) = init; TREE_VEC_ELT(incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node(code); TREE_TYPE(t) = void_type_node; OMP_FOR_INIT(t) = initv; OMP_FOR_COND(t) = condv; OMP_FOR_INCR(t) = incrv; OMP_FOR_BODY(t) = body; OMP_FOR_PRE_BODY(t) = pre_body; OMP_FOR_ORIG_DECLS(t) = orig_declv; SET_EXPR_LOCATION(t, locus); return t; } } /* * Type for passing data in between c_omp_check_loop_iv and * c_omp_check_loop_iv_r. */ struct c_omp_check_loop_iv_data { tree declv; bool fail; location_t stmt_loc; location_t expr_loc; int kind; walk_tree_lh lh; hash_set < tree > *ppset; }; /* * Helper function called via walk_tree, to diagnose uses of associated loop * IVs inside of lb, b and incr expressions of OpenMP loops. */ static tree c_omp_check_loop_iv_r(tree * tp, int *walk_subtrees, void *data) { struct c_omp_check_loop_iv_data *d = (struct c_omp_check_loop_iv_data *)data; if (DECL_P(*tp)) { int i; for (i = 0; i < TREE_VEC_LENGTH(d->declv); i++) if (*tp == TREE_VEC_ELT(d->declv, i)) { location_t loc = d->expr_loc; if (loc == UNKNOWN_LOCATION) loc = d->stmt_loc; switch (d->kind) { case 0: error_at(loc, "initializer expression refers to " "iteration variable %qD", *tp); break; case 1: error_at(loc, "condition expression refers to " "iteration variable %qD", *tp); break; case 2: error_at(loc, "increment expression refers to " "iteration variable %qD", *tp); break; } d->fail = true; } } /* Don't walk dtors added by C++ wrap_cleanups_r. */ else if (TREE_CODE(*tp) == TRY_CATCH_EXPR && TRY_CATCH_IS_CLEANUP(*tp)) { *walk_subtrees = 0; return walk_tree_1(&TREE_OPERAND(*tp, 0), c_omp_check_loop_iv_r, data, d->ppset, d->lh); } return NULL_TREE; } /* * Diagnose invalid references to loop iterators in lb, b and incr * expressions. */ bool c_omp_check_loop_iv(tree stmt, tree declv, walk_tree_lh lh) { hash_set < tree > pset; struct c_omp_check_loop_iv_data data; int i; data.declv = declv; data.fail = false; data.stmt_loc = EXPR_LOCATION(stmt); data.lh = lh; data.ppset = &pset; for (i = 0; i < TREE_VEC_LENGTH(OMP_FOR_INIT(stmt)); i++) { tree init = TREE_VEC_ELT(OMP_FOR_INIT(stmt), i); gcc_assert(TREE_CODE(init) == MODIFY_EXPR); tree decl = TREE_OPERAND(init, 0); tree cond = TREE_VEC_ELT(OMP_FOR_COND(stmt), i); gcc_assert(COMPARISON_CLASS_P(cond)); gcc_assert(TREE_OPERAND(cond, 0) == decl); tree incr = TREE_VEC_ELT(OMP_FOR_INCR(stmt), i); data.expr_loc = EXPR_LOCATION(TREE_OPERAND(init, 1)); data.kind = 0; walk_tree_1(&TREE_OPERAND(init, 1), c_omp_check_loop_iv_r, &data, &pset, lh); /* * Don't warn for C++ random access iterators here, the expression * then involves the subtraction and always refers to the original * value. The C++ FE needs to warn on those earlier. */ if (decl == TREE_VEC_ELT(declv, i)) { data.expr_loc = EXPR_LOCATION(cond); data.kind = 1; walk_tree_1(&TREE_OPERAND(cond, 1), c_omp_check_loop_iv_r, &data, &pset, lh); } if (TREE_CODE(incr) == MODIFY_EXPR) { gcc_assert(TREE_OPERAND(incr, 0) == decl); incr = TREE_OPERAND(incr, 1); data.kind = 2; if (TREE_CODE(incr) == PLUS_EXPR && TREE_OPERAND(incr, 1) == decl) { data.expr_loc = EXPR_LOCATION(TREE_OPERAND(incr, 0)); walk_tree_1(&TREE_OPERAND(incr, 0), c_omp_check_loop_iv_r, &data, &pset, lh); } else { data.expr_loc = EXPR_LOCATION(TREE_OPERAND(incr, 1)); walk_tree_1(&TREE_OPERAND(incr, 1), c_omp_check_loop_iv_r, &data, &pset, lh); } } } return !data.fail; } /* Similar, but allows to check the init or cond expressions individually. */ bool c_omp_check_loop_iv_exprs(location_t stmt_loc, tree declv, tree decl, tree init, tree cond, walk_tree_lh lh) { hash_set < tree > pset; struct c_omp_check_loop_iv_data data; data.declv = declv; data.fail = false; data.stmt_loc = stmt_loc; data.lh = lh; data.ppset = &pset; if (init) { data.expr_loc = EXPR_LOCATION(init); data.kind = 0; walk_tree_1(&init, c_omp_check_loop_iv_r, &data, &pset, lh); } if (cond) { gcc_assert(COMPARISON_CLASS_P(cond)); data.expr_loc = EXPR_LOCATION(init); data.kind = 1; if (TREE_OPERAND(cond, 0) == decl) walk_tree_1(&TREE_OPERAND(cond, 1), c_omp_check_loop_iv_r, &data, &pset, lh); else walk_tree_1(&TREE_OPERAND(cond, 0), c_omp_check_loop_iv_r, &data, &pset, lh); } return !data.fail; } /* * This function splits clauses for OpenACC combined loop constructs. * OpenACC combined loop constructs are: #pragma acc kernels loop #pragma acc * parallel loop */ tree c_oacc_split_loop_clauses(tree clauses, tree * not_loop_clauses, bool is_parallel) { tree next, loop_clauses, nc; loop_clauses = *not_loop_clauses = NULL_TREE; for (; clauses; clauses = next) { next = OMP_CLAUSE_CHAIN(clauses); switch (OMP_CLAUSE_CODE(clauses)) { /* Loop clauses. */ case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_TILE: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_PRIVATE: OMP_CLAUSE_CHAIN(clauses) = loop_clauses; loop_clauses = clauses; break; /* Reductions must be duplicated on both constructs. */ case OMP_CLAUSE_REDUCTION: if (is_parallel) { nc = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL(nc) = OMP_CLAUSE_DECL(clauses); OMP_CLAUSE_REDUCTION_CODE(nc) = OMP_CLAUSE_REDUCTION_CODE(clauses); OMP_CLAUSE_CHAIN(nc) = *not_loop_clauses; *not_loop_clauses = nc; } OMP_CLAUSE_CHAIN(clauses) = loop_clauses; loop_clauses = clauses; break; /* Parallel/kernels clauses. */ default: OMP_CLAUSE_CHAIN(clauses) = *not_loop_clauses; *not_loop_clauses = clauses; break; } } return loop_clauses; } /* * This function attempts to split or duplicate clauses for OpenMP * combined/composite constructs. Right now there are 21 different * constructs. CODE is the innermost construct in the combined construct, * and MASK allows to determine which constructs are combined together, as * every construct has at least one clause that no other construct has * (except for OMP_SECTIONS, but that can be only combined with parallel). * OpenMP combined/composite constructs are: #pragma omp distribute parallel * for #pragma omp distribute parallel for simd #pragma omp distribute simd * #pragma omp for simd #pragma omp parallel for #pragma omp parallel for * simd #pragma omp parallel sections #pragma omp target parallel #pragma omp * target parallel for #pragma omp target parallel for simd #pragma omp * target teams #pragma omp target teams distribute #pragma omp target teams * distribute parallel for #pragma omp target teams distribute parallel for * simd #pragma omp target teams distribute simd #pragma omp target simd * #pragma omp taskloop simd #pragma omp teams distribute #pragma omp teams * distribute parallel for #pragma omp teams distribute parallel for simd * #pragma omp teams distribute simd */ void c_omp_split_clauses(location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree * cclauses) { tree next, c; enum c_omp_clause_split s; int i; for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) cclauses[i] = NULL; /* * Add implicit nowait clause on #pragma omp parallel {for,for * simd,sections}. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) switch (code) { case OMP_FOR: case OMP_SIMD: cclauses[C_OMP_CLAUSE_SPLIT_FOR] = build_omp_clause(loc, OMP_CLAUSE_NOWAIT); break; case OMP_SECTIONS: cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS] = build_omp_clause(loc, OMP_CLAUSE_NOWAIT); break; default: break; } for (; clauses; clauses = next) { next = OMP_CLAUSE_CHAIN(clauses); switch (OMP_CLAUSE_CODE(clauses)) { /* First the clauses that are unique to some constructs. */ case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_MAP: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_DEPEND: s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_DIST_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_PROC_BIND: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_CLAUSE_ORDERED: s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_SCHEDULE: s = C_OMP_CLAUSE_SPLIT_FOR; if (code != OMP_SIMD) OMP_CLAUSE_SCHEDULE_SIMD(clauses) = 0; break; case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_ALIGNED: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_PRIORITY: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; /* Duplicate this to all of taskloop, distribute, for and simd. */ case OMP_CLAUSE_COLLAPSE: if (code == OMP_SIMD) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR(c) = OMP_CLAUSE_COLLAPSE_EXPR(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } else { /* This must be #pragma omp target simd */ s = C_OMP_CLAUSE_SPLIT_SIMD; break; } } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR(c) = OMP_CLAUSE_COLLAPSE_EXPR(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c; s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else s = C_OMP_CLAUSE_SPLIT_FOR; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; /* * Private clause is supported on all constructs, it is enough to * put it on the innermost one. For #pragma omp {for,sections} * put it on parallel though, as that's what we did for OpenMP * 3.1. */ case OMP_CLAUSE_PRIVATE: switch (code) { case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break; case OMP_FOR: case OMP_SECTIONS: case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break; default: gcc_unreachable(); } break; /* * Firstprivate clause is supported on all constructs but simd. * Put it on the outermost of those and duplicate on teams and * parallel. */ case OMP_CLAUSE_FIRSTPRIVATE: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (code == OMP_SIMD && (mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0) { /* This must be #pragma omp target simd. */ s = C_OMP_CLAUSE_SPLIT_TARGET; break; } c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL(c) = OMP_CLAUSE_DECL(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL(c) = OMP_CLAUSE_DECL(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) s = C_OMP_CLAUSE_SPLIT_TEAMS; else s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else /* * This must be #pragma omp parallel{, for{, simd}, * sections} or #pragma omp target parallel. */ s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { /* * This must be one of #pragma omp {,target }teams distribute * #pragma omp target teams #pragma omp {,target }teams * distribute simd. */ gcc_assert(code == OMP_DISTRIBUTE || code == OMP_TEAMS || code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { /* This must be #pragma omp distribute simd. */ gcc_assert(code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { /* This must be #pragma omp taskloop simd. */ gcc_assert(code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_TASKLOOP; } else { /* This must be #pragma omp for simd. */ gcc_assert(code == OMP_SIMD); s = C_OMP_CLAUSE_SPLIT_FOR; } break; /* * Lastprivate is allowed on distribute, for, sections and simd. * In parallel {for{, simd},sections} we actually want to put it * on parallel rather than for or sections. */ case OMP_CLAUSE_LASTPRIVATE: if (code == OMP_DISTRIBUTE) { s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL(c) = OMP_CLAUSE_DECL(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c; } if (code == OMP_FOR || code == OMP_SECTIONS) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; break; } gcc_assert(code == OMP_SIMD); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL(c) = OMP_CLAUSE_DECL(clauses); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; OMP_CLAUSE_CHAIN(c) = cclauses[s]; cclauses[s] = c; } s = C_OMP_CLAUSE_SPLIT_SIMD; break; /* * Shared and default clauses are allowed on parallel, teams and * taskloop. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_DEFAULT: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) { s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) { s = C_OMP_CLAUSE_SPLIT_TEAMS; break; } c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_CODE(clauses)); if (OMP_CLAUSE_CODE(clauses) == OMP_CLAUSE_SHARED) OMP_CLAUSE_DECL(c) = OMP_CLAUSE_DECL(clauses); else OMP_CLAUSE_DEFAULT_KIND(c) = OMP_CLAUSE_DEFAULT_KIND(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c; } s = C_OMP_CLAUSE_SPLIT_PARALLEL; break; /* * Reduction is allowed on simd, for, parallel, sections and * teams. Duplicate it on all of them, but omit on for or * sections if parallel is present. */ case OMP_CLAUSE_REDUCTION: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0) { if (code == OMP_SIMD) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL(c) = OMP_CLAUSE_DECL(clauses); OMP_CLAUSE_REDUCTION_CODE(c) = OMP_CLAUSE_REDUCTION_CODE(clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER(c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER(clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER(c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c; } if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_REDUCTION); OMP_CLAUSE_DECL(c) = OMP_CLAUSE_DECL(clauses); OMP_CLAUSE_REDUCTION_CODE(c) = OMP_CLAUSE_REDUCTION_CODE(clauses); OMP_CLAUSE_REDUCTION_PLACEHOLDER(c) = OMP_CLAUSE_REDUCTION_PLACEHOLDER(clauses); OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER(c) = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c; s = C_OMP_CLAUSE_SPLIT_TEAMS; } else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else s = C_OMP_CLAUSE_SPLIT_FOR; } else if (code == OMP_SECTIONS || code == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_TEAMS; break; case OMP_CLAUSE_IF: if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0) s = C_OMP_CLAUSE_SPLIT_TASKLOOP; else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0) { if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) { if (OMP_CLAUSE_IF_MODIFIER(clauses) == OMP_PARALLEL) s = C_OMP_CLAUSE_SPLIT_PARALLEL; else if (OMP_CLAUSE_IF_MODIFIER(clauses) == OMP_TARGET) s = C_OMP_CLAUSE_SPLIT_TARGET; else if (OMP_CLAUSE_IF_MODIFIER(clauses) == ERROR_MARK) { c = build_omp_clause(OMP_CLAUSE_LOCATION(clauses), OMP_CLAUSE_IF); OMP_CLAUSE_IF_MODIFIER(c) = OMP_CLAUSE_IF_MODIFIER(clauses); OMP_CLAUSE_IF_EXPR(c) = OMP_CLAUSE_IF_EXPR(clauses); OMP_CLAUSE_CHAIN(c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c; s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else { error_at(OMP_CLAUSE_LOCATION(clauses), "expected %<parallel%> or %<target%> %<if%> " "clause modifier"); continue; } } else s = C_OMP_CLAUSE_SPLIT_PARALLEL; } else s = C_OMP_CLAUSE_SPLIT_TARGET; break; case OMP_CLAUSE_LINEAR: /* * Linear clause is allowed on simd and for. Put it on the * innermost construct. */ if (code == OMP_SIMD) s = C_OMP_CLAUSE_SPLIT_SIMD; else s = C_OMP_CLAUSE_SPLIT_FOR; break; case OMP_CLAUSE_NOWAIT: /* * Nowait clause is allowed on target, for and sections, but is * not allowed on parallel for or parallel sections. Therefore, * put it on target construct if present, because that can only * be combined with parallel for{, simd} and not with for{, * simd}, otherwise to the worksharing construct. */ if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0) s = C_OMP_CLAUSE_SPLIT_TARGET; else s = C_OMP_CLAUSE_SPLIT_FOR; break; default: gcc_unreachable(); } OMP_CLAUSE_CHAIN(clauses) = cclauses[s]; cclauses[s] = clauses; } if (!flag_checking) return; if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0) gcc_assert(cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0) gcc_assert(cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0) gcc_assert(cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE); if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0) gcc_assert(cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE); if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0 && code != OMP_SECTIONS) gcc_assert(cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE); if (code != OMP_SIMD) gcc_assert(cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE); } /* qsort callback to compare #pragma omp declare simd clauses. */ static int c_omp_declare_simd_clause_cmp(const void *p, const void *q) { tree a = *(const tree *)p; tree b = *(const tree *)q; if (OMP_CLAUSE_CODE(a) != OMP_CLAUSE_CODE(b)) { if (OMP_CLAUSE_CODE(a) > OMP_CLAUSE_CODE(b)) return -1; return 1; } if (OMP_CLAUSE_CODE(a) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE(a) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE(a) != OMP_CLAUSE_NOTINBRANCH) { int c = tree_to_shwi(OMP_CLAUSE_DECL(a)); int d = tree_to_shwi(OMP_CLAUSE_DECL(b)); if (c < d) return 1; if (c > d) return -1; } return 0; } /* * Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd CLAUSES * on FNDECL into argument indexes and sort them. */ tree c_omp_declare_simd_clauses_to_numbers(tree parms, tree clauses) { tree c; vec < tree > clvec = vNULL; for (c = clauses; c; c = OMP_CLAUSE_CHAIN(c)) { if (OMP_CLAUSE_CODE(c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE(c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE(c) != OMP_CLAUSE_NOTINBRANCH) { tree decl = OMP_CLAUSE_DECL(c); tree arg; int idx; for (arg = parms, idx = 0; arg; arg = TREE_CHAIN(arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at(OMP_CLAUSE_LOCATION(c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_DECL(c) = build_int_cst(integer_type_node, idx); if (OMP_CLAUSE_CODE(c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE(c)) { decl = OMP_CLAUSE_LINEAR_STEP(c); for (arg = parms, idx = 0; arg; arg = TREE_CHAIN(arg), idx++) if (arg == decl) break; if (arg == NULL_TREE) { error_at(OMP_CLAUSE_LOCATION(c), "%qD is not an function argument", decl); continue; } OMP_CLAUSE_LINEAR_STEP(c) = build_int_cst(integer_type_node, idx); } } clvec.safe_push(c); } if (!clvec.is_empty()) { unsigned int len = clvec.length(), i; clvec.qsort(c_omp_declare_simd_clause_cmp); clauses = clvec[0]; for (i = 0; i < len; i++) OMP_CLAUSE_CHAIN(clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE; } else clauses = NULL_TREE; clvec.release(); return clauses; } /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */ void c_omp_declare_simd_clauses_to_decls(tree fndecl, tree clauses) { tree c; for (c = clauses; c; c = OMP_CLAUSE_CHAIN(c)) if (OMP_CLAUSE_CODE(c) != OMP_CLAUSE_SIMDLEN && OMP_CLAUSE_CODE(c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE(c) != OMP_CLAUSE_NOTINBRANCH) { int idx = tree_to_shwi(OMP_CLAUSE_DECL(c)), i; tree arg; for (arg = DECL_ARGUMENTS(fndecl), i = 0; arg; arg = TREE_CHAIN(arg), i++) if (i == idx) break; gcc_assert(arg); OMP_CLAUSE_DECL(c) = arg; if (OMP_CLAUSE_CODE(c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE(c)) { idx = tree_to_shwi(OMP_CLAUSE_LINEAR_STEP(c)); for (arg = DECL_ARGUMENTS(fndecl), i = 0; arg; arg = TREE_CHAIN(arg), i++) if (i == idx) break; gcc_assert(arg); OMP_CLAUSE_LINEAR_STEP(c) = arg; } } } /* True if OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing(tree decl) { /* * Variables with const-qualified type having no mutable member are * predetermined shared. */ if (TREE_READONLY(decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; }
trmv_x_dia_n_hi_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis == 0) { const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return ONAME_omp(alpha, A, x, beta, y); }
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA * A, const ALPHA_Number * x, const ALPHA_Number beta, ALPHA_Number * y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if (m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number **tmp = (ALPHA_Number **) malloc(sizeof(ALPHA_Number *) * thread_num); for (int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if (dis == 0) { const ALPHA_INT start = i * A->lval; for (ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if (dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for (ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } for (ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA * A, const ALPHA_Number * x, const ALPHA_Number beta, ALPHA_Number * y) { return ONAME_omp(alpha, A, x, beta, y); }
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA * A, const ALPHA_Number * x, const ALPHA_Number beta, ALPHA_Number * y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if (m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number **tmp = (ALPHA_Number **) malloc(sizeof(ALPHA_Number *) * thread_num); for (int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if (dis == 0) { const ALPHA_INT start = i * A->lval; for (ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if (dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for (ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA * A, const ALPHA_Number * x, const ALPHA_Number beta, ALPHA_Number * y) { return ONAME_omp(alpha, A, x, beta, y); }
_polyprism.c
/* Generated by Cython 0.20.1 on Thu Jul 3 12:41:06 2014 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #define CYTHON_ABI "0_20_1" #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY #define Py_OptimizeFlag 0 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_VERSION_HEX < 0x02060000 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x02060000 #define Py_TPFLAGS_HAVE_VERSION_TAG 0 #endif #if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT) #define Py_TPFLAGS_IS_ABSTRACT 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \ PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__fatiando__gravmag___polyprism #define __PYX_HAVE_API__fatiando__gravmag___polyprism #include "math.h" #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "omp.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s) #define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s) #define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s) #define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s) #define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return u_end - u - 1; } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { const char* default_encoding_c = PyBytes_AS_STRING(default_encoding); char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (ascii_chars_u == NULL) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } } Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; default_encoding_c = PyBytes_AS_STRING(default_encoding); __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(sys); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); return -1; } #endif #endif #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_polyprism.pyx", "__init__.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fatiando/gravmag/_polyprism.pyx":16 * * DTYPE = numpy.float * ctypedef numpy.float_t DTYPE_T # <<<<<<<<<<<<<< * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, */ typedef __pyx_t_5numpy_float_t __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) static CYTHON_INLINE long __Pyx_mod_long(long, long); /* proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'openmp' */ /* Module declarations from 'fatiando.gravmag._polyprism' */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double, double, double, double, double, double, double, double); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T = { "DTYPE_T", NULL, sizeof(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "fatiando.gravmag._polyprism" int __pyx_module_is_main_fatiando__gravmag___polyprism = 0; /* Implementation of 'fatiando.gravmag._polyprism' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_B[] = "B"; static char __pyx_k_H[] = "H"; static char __pyx_k_I[] = "I"; static char __pyx_k_L[] = "L"; static char __pyx_k_O[] = "O"; static char __pyx_k_Q[] = "Q"; static char __pyx_k_b[] = "b"; static char __pyx_k_d[] = "d"; static char __pyx_k_f[] = "f"; static char __pyx_k_g[] = "g"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_k[] = "k"; static char __pyx_k_l[] = "l"; static char __pyx_k_q[] = "q"; static char __pyx_k_x[] = "x"; static char __pyx_k_y[] = "y"; static char __pyx_k_X1[] = "X1"; static char __pyx_k_X2[] = "X2"; static char __pyx_k_Y1[] = "Y1"; static char __pyx_k_Y2[] = "Y2"; static char __pyx_k_Z1[] = "Z1"; static char __pyx_k_Z2[] = "Z2"; static char __pyx_k_Zd[] = "Zd"; static char __pyx_k_Zf[] = "Zf"; static char __pyx_k_Zg[] = "Zg"; static char __pyx_k_bx[] = "bx"; static char __pyx_k_by[] = "by"; static char __pyx_k_bz[] = "bz"; static char __pyx_k_fx[] = "fx"; static char __pyx_k_fy[] = "fy"; static char __pyx_k_fz[] = "fz"; static char __pyx_k_gz[] = "gz"; static char __pyx_k_mx[] = "mx"; static char __pyx_k_my[] = "my"; static char __pyx_k_mz[] = "mz"; static char __pyx_k_tf[] = "tf"; static char __pyx_k_v1[] = "v1"; static char __pyx_k_v2[] = "v2"; static char __pyx_k_v3[] = "v3"; static char __pyx_k_v4[] = "v4"; static char __pyx_k_v5[] = "v5"; static char __pyx_k_v6[] = "v6"; static char __pyx_k_xp[] = "xp"; static char __pyx_k_yp[] = "yp"; static char __pyx_k_z1[] = "z1"; static char __pyx_k_z2[] = "z2"; static char __pyx_k_zp[] = "zp"; static char __pyx_k_gxx[] = "gxx"; static char __pyx_k_gxy[] = "gxy"; static char __pyx_k_gxz[] = "gxz"; static char __pyx_k_gyy[] = "gyy"; static char __pyx_k_gyz[] = "gyz"; static char __pyx_k_gzz[] = "gzz"; static char __pyx_k_kp1[] = "kp1"; static char __pyx_k_res[] = "res"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_size[] = "size"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_DTYPE[] = "DTYPE"; static char __pyx_k_float[] = "float"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_Z1_sqr[] = "Z1_sqr"; static char __pyx_k_Z2_sqr[] = "Z2_sqr"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_kernel[] = "kernel"; static char __pyx_k_nverts[] = "nverts"; static char __pyx_k_density[] = "density"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_RuntimeError[] = "RuntimeError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer"; static char __pyx_k_fatiando_gravmag__polyprism[] = "fatiando.gravmag._polyprism"; static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static char __pyx_k_home_leo_src_fatiando_fatiando[] = "/home/leo/src/fatiando/fatiando/gravmag/_polyprism.pyx"; static char __pyx_k_This_is_a_Cython_implementation[] = "\nThis is a Cython implementation of the potential fields of a polygonal prism.\nA pure python implementation is in _polyprism_numpy.py\n"; static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_X1; static PyObject *__pyx_n_s_X2; static PyObject *__pyx_n_s_Y1; static PyObject *__pyx_n_s_Y2; static PyObject *__pyx_n_s_Z1; static PyObject *__pyx_n_s_Z1_sqr; static PyObject *__pyx_n_s_Z2; static PyObject *__pyx_n_s_Z2_sqr; static PyObject *__pyx_n_s_bx; static PyObject *__pyx_n_s_by; static PyObject *__pyx_n_s_bz; static PyObject *__pyx_n_s_density; static PyObject *__pyx_n_s_fatiando_gravmag__polyprism; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_fx; static PyObject *__pyx_n_s_fy; static PyObject *__pyx_n_s_fz; static PyObject *__pyx_n_s_gxx; static PyObject *__pyx_n_s_gxy; static PyObject *__pyx_n_s_gxz; static PyObject *__pyx_n_s_gyy; static PyObject *__pyx_n_s_gyz; static PyObject *__pyx_n_s_gz; static PyObject *__pyx_n_s_gzz; static PyObject *__pyx_kp_s_home_leo_src_fatiando_fatiando; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_kernel; static PyObject *__pyx_n_s_kp1; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mx; static PyObject *__pyx_n_s_my; static PyObject *__pyx_n_s_mz; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_nverts; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_releasebuffer; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_res; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tf; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_v1; static PyObject *__pyx_n_s_v2; static PyObject *__pyx_n_s_v3; static PyObject *__pyx_n_s_v4; static PyObject *__pyx_n_s_v5; static PyObject *__pyx_n_s_v6; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xp; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_yp; static PyObject *__pyx_n_s_z1; static PyObject *__pyx_n_s_z2; static PyObject *__pyx_n_s_zp; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_codeobj__8; static PyObject *__pyx_codeobj__10; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; /* "fatiando/gravmag/_polyprism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, double __pyx_v_Z1_sqr, double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":24 * Qk1, Qk2, Ak1, Ak2, R1k1, R1k2, R2k1, R2k2, Bk1, Bk2, E1k1, \ * E1k2, E2k1, E2k2, Ck1, Ck2 * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * p = X1*Y2 - X2*Y1 */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":25 * E1k2, E2k1, E2k2, Ck1, Ck2 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * p = X1*Y2 - X2*Y1 * p_sqr = p**2 */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":26 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * p = X1*Y2 - X2*Y1 # <<<<<<<<<<<<<< * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 */ __pyx_v_p = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":27 * kernel = 0 * p = X1*Y2 - X2*Y1 * p_sqr = p**2 # <<<<<<<<<<<<<< * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 */ __pyx_v_p_sqr = pow(__pyx_v_p, 2.0); /* "fatiando/gravmag/_polyprism.pyx":28 * p = X1*Y2 - X2*Y1 * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 # <<<<<<<<<<<<<< * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 */ __pyx_v_Qk1 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y1) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X1)); /* "fatiando/gravmag/_polyprism.pyx":29 * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 # <<<<<<<<<<<<<< * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 */ __pyx_v_Qk2 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y2) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X2)); /* "fatiando/gravmag/_polyprism.pyx":30 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 # <<<<<<<<<<<<<< * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) */ __pyx_v_Ak1 = (pow(__pyx_v_X1, 2.0) + pow(__pyx_v_Y1, 2.0)); /* "fatiando/gravmag/_polyprism.pyx":31 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 # <<<<<<<<<<<<<< * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) */ __pyx_v_Ak2 = (pow(__pyx_v_X2, 2.0) + pow(__pyx_v_Y2, 2.0)); /* "fatiando/gravmag/_polyprism.pyx":32 * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) # <<<<<<<<<<<<<< * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) */ __pyx_v_R1k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z1_sqr)); /* "fatiando/gravmag/_polyprism.pyx":33 * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) # <<<<<<<<<<<<<< * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) */ __pyx_v_R1k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z1_sqr)); /* "fatiando/gravmag/_polyprism.pyx":34 * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) # <<<<<<<<<<<<<< * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) */ __pyx_v_R2k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":35 * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) # <<<<<<<<<<<<<< * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) */ __pyx_v_R2k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":36 * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) # <<<<<<<<<<<<<< * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) */ __pyx_v_Ak1 = sqrt(__pyx_v_Ak1); /* "fatiando/gravmag/_polyprism.pyx":37 * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) # <<<<<<<<<<<<<< * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) */ __pyx_v_Ak2 = sqrt(__pyx_v_Ak2); /* "fatiando/gravmag/_polyprism.pyx":38 * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) # <<<<<<<<<<<<<< * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 */ __pyx_v_Bk1 = sqrt((pow(__pyx_v_Qk1, 2.0) + __pyx_v_p_sqr)); /* "fatiando/gravmag/_polyprism.pyx":39 * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) # <<<<<<<<<<<<<< * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 */ __pyx_v_Bk2 = sqrt((pow(__pyx_v_Qk2, 2.0) + __pyx_v_p_sqr)); /* "fatiando/gravmag/_polyprism.pyx":40 * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 # <<<<<<<<<<<<<< * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 */ __pyx_v_E1k1 = (__pyx_v_R1k1 * __pyx_v_Bk1); /* "fatiando/gravmag/_polyprism.pyx":41 * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 # <<<<<<<<<<<<<< * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 */ __pyx_v_E1k2 = (__pyx_v_R1k2 * __pyx_v_Bk2); /* "fatiando/gravmag/_polyprism.pyx":42 * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 # <<<<<<<<<<<<<< * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) */ __pyx_v_E2k1 = (__pyx_v_R2k1 * __pyx_v_Bk1); /* "fatiando/gravmag/_polyprism.pyx":43 * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 # <<<<<<<<<<<<<< * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) */ __pyx_v_E2k2 = (__pyx_v_R2k2 * __pyx_v_Bk2); /* "fatiando/gravmag/_polyprism.pyx":44 * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) # <<<<<<<<<<<<<< * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) */ __pyx_v_kernel = (__pyx_v_kernel + ((__pyx_v_Z2 - __pyx_v_Z1) * (atan2(__pyx_v_Qk2, __pyx_v_p) - atan2(__pyx_v_Qk1, __pyx_v_p)))); /* "fatiando/gravmag/_polyprism.pyx":45 * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) # <<<<<<<<<<<<<< * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z2 * (atan2((__pyx_v_Z2 * __pyx_v_Qk1), (__pyx_v_R2k1 * __pyx_v_p)) - atan2((__pyx_v_Z2 * __pyx_v_Qk2), (__pyx_v_R2k2 * __pyx_v_p))))); /* "fatiando/gravmag/_polyprism.pyx":46 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) # <<<<<<<<<<<<<< * Ck1 = Qk1*Ak1 * Ck2 = Qk2*Ak2 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z1 * (atan2((__pyx_v_Z1 * __pyx_v_Qk2), (__pyx_v_R1k2 * __pyx_v_p)) - atan2((__pyx_v_Z1 * __pyx_v_Qk1), (__pyx_v_R1k1 * __pyx_v_p))))); /* "fatiando/gravmag/_polyprism.pyx":47 * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 # <<<<<<<<<<<<<< * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors */ __pyx_v_Ck1 = (__pyx_v_Qk1 * __pyx_v_Ak1); /* "fatiando/gravmag/_polyprism.pyx":48 * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 * Ck2 = Qk2*Ak2 # <<<<<<<<<<<<<< * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( */ __pyx_v_Ck2 = (__pyx_v_Qk2 * __pyx_v_Ak2); /* "fatiando/gravmag/_polyprism.pyx":50 * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( # <<<<<<<<<<<<<< * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_t_1 = (__pyx_v_Bk1 + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":51 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( */ __pyx_t_2 = (__pyx_v_E1k1 - __pyx_v_Ck1); __pyx_t_3 = ((__pyx_v_E1k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":52 * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) # <<<<<<<<<<<<<< * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - */ __pyx_t_4 = (__pyx_v_E2k1 - __pyx_v_Ck1); __pyx_t_5 = ((__pyx_v_E2k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":50 * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( # <<<<<<<<<<<<<< * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak1 / __pyx_t_1)) * (log(((__pyx_t_2 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_4 / __pyx_t_5) + __pyx_v_dummy))))); /* "fatiando/gravmag/_polyprism.pyx":53 * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_t_5 = (__pyx_v_Bk2 + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":54 * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) * return kernel */ __pyx_t_4 = (__pyx_v_E2k2 - __pyx_v_Ck2); __pyx_t_3 = ((__pyx_v_E2k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":55 * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) # <<<<<<<<<<<<<< * return kernel * */ __pyx_t_2 = (__pyx_v_E1k2 - __pyx_v_Ck2); __pyx_t_1 = ((__pyx_v_E1k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":53 * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak2 / __pyx_t_5)) * (log(((__pyx_t_4 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy))))); /* "fatiando/gravmag/_polyprism.pyx":56 * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":58 * return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":65 * aux14, aux15, aux16, n, g, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":66 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":67 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":68 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":69 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":70 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":71 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":72 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":73 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":74 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":75 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":76 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":77 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":78 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":79 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":80 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":81 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":82 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":83 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":84 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":85 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":86 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":87 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":88 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":89 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":90 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":91 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = (((__pyx_v_g * __pyx_v_Y2) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":92 * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":93 * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":94 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":95 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":96 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":97 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_g * __pyx_v_Y1) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":98 * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":99 * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":100 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":101 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":102 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":103 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":104 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_n * (__pyx_v_aux15 - __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":105 * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= -aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":106 * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* "fatiando/gravmag/_polyprism.pyx":107 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":108 * res *= -aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":58 * return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxx", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":110 * return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":117 * aux14, aux15, aux16, n, g, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":118 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":119 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":120 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":121 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":122 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":123 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":124 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":125 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":126 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":127 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":128 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":129 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":130 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":131 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":132 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":133 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":134 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":135 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":136 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":137 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":138 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":139 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":140 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":141 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":142 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":143 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y2)) * __pyx_v_aux13) - __pyx_v_aux14); /* "fatiando/gravmag/_polyprism.pyx":144 * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":145 * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":146 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":147 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":148 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":149 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y1)) * __pyx_v_aux13) - __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":150 * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":151 * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":152 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":153 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":154 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (aux14 - aux15) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":155 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":156 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (aux14 - aux15) # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_aux14 - __pyx_v_aux15)); /* "fatiando/gravmag/_polyprism.pyx":157 * aux15 = aux12 - aux13 * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":158 * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":159 * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":160 * res *= aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":110 * return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":162 * return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":169 * aux14, aux15, aux16, n, g, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":170 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":171 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":172 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":173 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":174 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":175 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":176 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":177 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":178 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":179 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":180 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":181 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":182 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":183 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":184 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":185 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":186 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":187 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":188 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":189 * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":190 * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":191 * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":192 * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) # <<<<<<<<<<<<<< * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":193 * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) # <<<<<<<<<<<<<< * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":194 * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) # <<<<<<<<<<<<<< * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* "fatiando/gravmag/_polyprism.pyx":195 * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 # <<<<<<<<<<<<<< * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 */ __pyx_v_res = (((__pyx_v_Y2 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16); /* "fatiando/gravmag/_polyprism.pyx":196 * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) # <<<<<<<<<<<<<< * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* "fatiando/gravmag/_polyprism.pyx":197 * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_Y1 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16)); /* "fatiando/gravmag/_polyprism.pyx":198 * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= -aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":199 * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* "fatiando/gravmag/_polyprism.pyx":200 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":201 * res *= -aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":162 * return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":203 * return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":210 * aux14, aux15, p, m, c, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":211 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":212 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":213 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * m = (aux1/aux0) * c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":214 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) # <<<<<<<<<<<<<< * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":215 * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) * c = Y1 - (X1*m) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* "fatiando/gravmag/_polyprism.pyx":216 * m = (aux1/aux0) * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":217 * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":218 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":219 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":220 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":221 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":222 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":223 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":224 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":225 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":226 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":227 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":228 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":229 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":230 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":231 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":232 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":233 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":234 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":235 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":236 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = (((__pyx_v_c * __pyx_v_X2) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":237 * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":238 * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":239 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":240 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":241 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":242 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_c * __pyx_v_X1) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":243 * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":244 * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":245 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":246 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":247 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":248 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":249 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_m * (__pyx_v_aux15 - __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":250 * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) # <<<<<<<<<<<<<< * res *= aux1 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":251 * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":252 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":253 * res *= aux1 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":203 * return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":255 * return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":262 * aux14, aux15, aux16, m, c, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":263 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":264 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":265 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * m = (aux1/aux0) * c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":266 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) # <<<<<<<<<<<<<< * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":267 * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) * c = Y1 - (X1*m) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* "fatiando/gravmag/_polyprism.pyx":268 * m = (aux1/aux0) * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":269 * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":270 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":271 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":272 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":273 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":274 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":275 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":276 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":277 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":278 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":279 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":280 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":281 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":282 * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":283 * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":284 * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":285 * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) # <<<<<<<<<<<<<< * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":286 * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) # <<<<<<<<<<<<<< * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":287 * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) # <<<<<<<<<<<<<< * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* "fatiando/gravmag/_polyprism.pyx":288 * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 # <<<<<<<<<<<<<< * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 */ __pyx_v_res = (((__pyx_v_X2 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16); /* "fatiando/gravmag/_polyprism.pyx":289 * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) # <<<<<<<<<<<<<< * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* "fatiando/gravmag/_polyprism.pyx":290 * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 # <<<<<<<<<<<<<< * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_X1 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16)); /* "fatiando/gravmag/_polyprism.pyx":291 * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) # <<<<<<<<<<<<<< * res *= aux1 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":292 * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":293 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":294 * res *= aux1 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":255 * return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":296 * return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":302 * aux5, aux6, aux7, aux8, aux9, aux10, aux11, aux12, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":303 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":304 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":305 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":306 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":307 * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":308 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":309 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":310 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":311 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":312 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":313 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":314 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":315 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":316 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":317 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":318 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":319 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":320 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":321 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":322 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * res = aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":323 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":324 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * res = aux12 # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = __pyx_v_aux12; /* "fatiando/gravmag/_polyprism.pyx":325 * aux12 = aux10 - aux11 * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":326 * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * res -= aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":327 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * res -= aux12 * kernel += res */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":328 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * res -= aux12 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res - __pyx_v_aux12); /* "fatiando/gravmag/_polyprism.pyx":329 * aux12 = aux10 - aux11 * res -= aux12 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":330 * res -= aux12 * kernel += res * return kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":296 * return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelzz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_gz[] = "gz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz = {__Pyx_NAMESTR("gz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_gz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_gz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":344 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 344; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":345 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 345; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":346 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":347 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":348 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":349 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":350 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":351 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":352 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":353 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":354 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":355 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":356 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":357 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":358 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":359 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":360 * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Y1; __pyx_parallel_temp1 = __pyx_v_Z2_sqr; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_Y2; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_kp1; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Y1 = __pyx_parallel_temp0; __pyx_v_Z2_sqr = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_Y2 = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_kp1 = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":346 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx[] = "gxx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx = {__Pyx_NAMESTR("gxx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 368; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":374 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":375 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":376 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":377 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_kp1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":378 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":379 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":380 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":381 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":382 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":383 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":384 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":385 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":386 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":387 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":388 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":389 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":390 * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates1) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_X2; __pyx_parallel_temp1 = __pyx_v_kernel; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_i; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Y2; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_Z2; __pyx_parallel_temp11 = __pyx_v_kp1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_X2 = __pyx_parallel_temp0; __pyx_v_kernel = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_i = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Y2 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_Z2 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":376 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy[] = "gxy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy = {__Pyx_NAMESTR("gxy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":404 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 404; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":405 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":406 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":407 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_kp1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":408 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":409 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":410 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":411 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":412 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":413 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":414 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":415 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":416 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":417 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":418 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":419 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":420 * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates2) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_k; __pyx_parallel_temp1 = __pyx_v_Y1; __pyx_parallel_temp2 = __pyx_v_X2; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Y2; __pyx_parallel_temp5 = __pyx_v_Z2_sqr; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_kp1; __pyx_parallel_temp11 = __pyx_v_i; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_k = __pyx_parallel_temp0; __pyx_v_Y1 = __pyx_parallel_temp1; __pyx_v_X2 = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Y2 = __pyx_parallel_temp4; __pyx_v_Z2_sqr = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_kp1 = __pyx_parallel_temp10; __pyx_v_i = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":406 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz[] = "gxz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz = {__Pyx_NAMESTR("gxz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 426; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 428; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":434 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":435 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 435; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":436 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":437 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); unsigned int __pyx_parallel_temp5 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":438 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":439 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":440 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":441 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":442 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":443 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":444 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":445 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":446 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":447 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":448 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":449 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":450 * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates3) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kernel; __pyx_parallel_temp1 = __pyx_v_Y2; __pyx_parallel_temp2 = __pyx_v_k; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_kp1; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_Y1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kernel = __pyx_parallel_temp0; __pyx_v_Y2 = __pyx_parallel_temp1; __pyx_v_k = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_kp1 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_Y1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":436 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy[] = "gyy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy = {__Pyx_NAMESTR("gyy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":464 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":465 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 465; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":466 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":467 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":468 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":469 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":470 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":471 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":472 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":473 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":474 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":475 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":476 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":477 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":478 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":479 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":480 * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates4) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_Z1_sqr; __pyx_parallel_temp2 = __pyx_v_i; __pyx_parallel_temp3 = __pyx_v_kernel; __pyx_parallel_temp4 = __pyx_v_Z2; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Y1; __pyx_parallel_temp7 = __pyx_v_X2; __pyx_parallel_temp8 = __pyx_v_X1; __pyx_parallel_temp9 = __pyx_v_Z2_sqr; __pyx_parallel_temp10 = __pyx_v_k; __pyx_parallel_temp11 = __pyx_v_Y2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_Z1_sqr = __pyx_parallel_temp1; __pyx_v_i = __pyx_parallel_temp2; __pyx_v_kernel = __pyx_parallel_temp3; __pyx_v_Z2 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Y1 = __pyx_parallel_temp6; __pyx_v_X2 = __pyx_parallel_temp7; __pyx_v_X1 = __pyx_parallel_temp8; __pyx_v_Z2_sqr = __pyx_parallel_temp9; __pyx_v_k = __pyx_parallel_temp10; __pyx_v_Y2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":466 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz[] = "gyz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz = {__Pyx_NAMESTR("gyz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 487; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":494 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 494; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":495 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 495; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":496 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":497 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_k) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":498 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":499 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":500 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":501 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":502 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":503 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":504 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":505 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":506 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":507 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":508 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":509 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":510 * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates5) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_kp1; __pyx_parallel_temp2 = __pyx_v_Z1; __pyx_parallel_temp3 = __pyx_v_X1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Y1; __pyx_parallel_temp9 = __pyx_v_kernel; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_k; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_kp1 = __pyx_parallel_temp1; __pyx_v_Z1 = __pyx_parallel_temp2; __pyx_v_X1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Y1 = __pyx_parallel_temp8; __pyx_v_kernel = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_k = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":496 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz[] = "gzz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz = {__Pyx_NAMESTR("gzz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gzz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gzz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gzz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":524 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":525 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":526 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":527 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z2_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":528 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":529 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":530 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":531 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":532 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":533 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":534 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":535 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":536 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":537 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":538 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":539 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":540 * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates6) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_k; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_i; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_Z1; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_k = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_i = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_Z1 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":526 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_14tf[] = "tf(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, double fx, double fy, double fz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf = {__Pyx_NAMESTR("tf"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_14tf)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; double __pyx_v_fx; double __pyx_v_fy; double __pyx_v_fz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("tf (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_fx,&__pyx_n_s_fy,&__pyx_n_s_fz,&__pyx_n_s_res,0}; PyObject* values[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 13: if (likely((values[13] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 13); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 14) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); values[13] = PyTuple_GET_ITEM(__pyx_args, 13); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fx = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_fx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fy = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_fy == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fz = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_fz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[13]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 545; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 547; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 551; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_fx, __pyx_v_fy, __pyx_v_fz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("tf", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":556 * DTYPE_T v1, v2, v3, v4, v5, v6 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 556; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":557 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":558 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":559 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); unsigned int __pyx_parallel_temp13 = 0xbad0bad0; unsigned int __pyx_parallel_temp14 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp15 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp16 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v5) reduction(+:__pyx_v_v6) reduction(+:__pyx_v_v1) reduction(+:__pyx_v_v4) reduction(+:__pyx_v_v3) reduction(+:__pyx_v_v2) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_Z2_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":560 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":561 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":562 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":563 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v1 = 0 * v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":564 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v1 = 0 # <<<<<<<<<<<<<< * v2 = 0 * v3 = 0 */ __pyx_v_v1 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":565 * Z2_sqr = Z2**2 * v1 = 0 * v2 = 0 # <<<<<<<<<<<<<< * v3 = 0 * v4 = 0 */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":566 * v1 = 0 * v2 = 0 * v3 = 0 # <<<<<<<<<<<<<< * v4 = 0 * v5 = 0 */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":567 * v2 = 0 * v3 = 0 * v4 = 0 # <<<<<<<<<<<<<< * v5 = 0 * v6 = 0 */ __pyx_v_v4 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":568 * v3 = 0 * v4 = 0 * v5 = 0 # <<<<<<<<<<<<<< * v6 = 0 * for k in range(nverts): */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":569 * v4 = 0 * v5 = 0 * v6 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":570 * v5 = 0 * v6 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":571 * v6 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":572 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":573 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":574 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":575 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":576 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":577 * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":578 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":579 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":580 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (fx*(v1*mx + v2*my + v3*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":581 * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (fx*(v1*mx + v2*my + v3*mz) * + fy*(v2*mx + v4*my + v5*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":582 * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (fx*(v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * + fy*(v2*mx + v4*my + v5*mz) * + fz*(v3*mx + v5*my + v6*mz)) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_fx * (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz))) + (__pyx_v_fy * (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)))) + (__pyx_v_fz * (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)))); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates7) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_v5; __pyx_parallel_temp2 = __pyx_v_v6; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_v1; __pyx_parallel_temp5 = __pyx_v_v4; __pyx_parallel_temp6 = __pyx_v_v3; __pyx_parallel_temp7 = __pyx_v_Z2_sqr; __pyx_parallel_temp8 = __pyx_v_i; __pyx_parallel_temp9 = __pyx_v_v2; __pyx_parallel_temp10 = __pyx_v_Z1_sqr; __pyx_parallel_temp11 = __pyx_v_X1; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_kp1; __pyx_parallel_temp14 = __pyx_v_k; __pyx_parallel_temp15 = __pyx_v_X2; __pyx_parallel_temp16 = __pyx_v_Z1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_v5 = __pyx_parallel_temp1; __pyx_v_v6 = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_v1 = __pyx_parallel_temp4; __pyx_v_v4 = __pyx_parallel_temp5; __pyx_v_v3 = __pyx_parallel_temp6; __pyx_v_Z2_sqr = __pyx_parallel_temp7; __pyx_v_i = __pyx_parallel_temp8; __pyx_v_v2 = __pyx_parallel_temp9; __pyx_v_Z1_sqr = __pyx_parallel_temp10; __pyx_v_X1 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_kp1 = __pyx_parallel_temp13; __pyx_v_k = __pyx_parallel_temp14; __pyx_v_X2 = __pyx_parallel_temp15; __pyx_v_Z1 = __pyx_parallel_temp16; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":558 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_16bx[] = "bx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx = {__Pyx_NAMESTR("bx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_16bx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":600 * DTYPE_T v1, v2, v3 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":601 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 601; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":602 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":603 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v1) reduction(+:__pyx_v_v2) reduction(+:__pyx_v_v3) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":604 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":605 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":606 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":607 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v1 = 0 * v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":608 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v1 = 0 # <<<<<<<<<<<<<< * v2 = 0 * v3 = 0 */ __pyx_v_v1 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":609 * Z2_sqr = Z2**2 * v1 = 0 * v2 = 0 # <<<<<<<<<<<<<< * v3 = 0 * for k in range(nverts): */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":610 * v1 = 0 * v2 = 0 * v3 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":611 * v2 = 0 * v3 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":612 * v3 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":613 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":614 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":615 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":616 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":617 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":618 * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v1*mx + v2*my + v3*mz) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":619 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v1*mx + v2*my + v3*mz) * */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":620 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates8) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2_sqr; __pyx_parallel_temp1 = __pyx_v_v1; __pyx_parallel_temp2 = __pyx_v_Y2; __pyx_parallel_temp3 = __pyx_v_Z1_sqr; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_X1; __pyx_parallel_temp6 = __pyx_v_i; __pyx_parallel_temp7 = __pyx_v_v2; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_v3; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_kp1; __pyx_parallel_temp12 = __pyx_v_Z1; __pyx_parallel_temp13 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2_sqr = __pyx_parallel_temp0; __pyx_v_v1 = __pyx_parallel_temp1; __pyx_v_Y2 = __pyx_parallel_temp2; __pyx_v_Z1_sqr = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_X1 = __pyx_parallel_temp5; __pyx_v_i = __pyx_parallel_temp6; __pyx_v_v2 = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_v3 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; __pyx_v_Z1 = __pyx_parallel_temp12; __pyx_v_X2 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":602 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_18by[] = "by(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_19by = {__Pyx_NAMESTR("by"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_19by, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_18by)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("by (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "by") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 625; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 631; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_18by(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("by", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":636 * DTYPE_T v2, v4, v5 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":637 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":638 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":639 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); unsigned int __pyx_parallel_temp7 = 0xbad0bad0; unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v4) reduction(+:__pyx_v_v5) reduction(+:__pyx_v_v2) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_X1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":640 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":641 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":642 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v2 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":643 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v2 = 0 * v4 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":644 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v2 = 0 # <<<<<<<<<<<<<< * v4 = 0 * v5 = 0 */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":645 * Z2_sqr = Z2**2 * v2 = 0 * v4 = 0 # <<<<<<<<<<<<<< * v5 = 0 * for k in range(nverts): */ __pyx_v_v4 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":646 * v2 = 0 * v4 = 0 * v5 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":647 * v4 = 0 * v5 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":648 * v5 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":649 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":650 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":651 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":652 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":653 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":654 * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v2*mx + v4*my + v5*mz) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":655 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v2*mx + v4*my + v5*mz) * */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":656 * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v2*mx + v4*my + v5*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates9) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z1; __pyx_parallel_temp1 = __pyx_v_Z2; __pyx_parallel_temp2 = __pyx_v_v4; __pyx_parallel_temp3 = __pyx_v_v5; __pyx_parallel_temp4 = __pyx_v_v2; __pyx_parallel_temp5 = __pyx_v_X2; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_kp1; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Y1; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_Z2_sqr; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_X1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z1 = __pyx_parallel_temp0; __pyx_v_Z2 = __pyx_parallel_temp1; __pyx_v_v4 = __pyx_parallel_temp2; __pyx_v_v5 = __pyx_parallel_temp3; __pyx_v_v2 = __pyx_parallel_temp4; __pyx_v_X2 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_kp1 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Y1 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_Z2_sqr = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_X1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":638 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_20bz[] = "bz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz = {__Pyx_NAMESTR("bz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_20bz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 664; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":672 * DTYPE_T v3, v5, v6 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 672; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":673 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":674 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":675 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v3) reduction(+:__pyx_v_v6) reduction(+:__pyx_v_v5) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Y1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":676 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":677 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":678 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v3 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":679 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v3 = 0 * v5 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":680 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v3 = 0 # <<<<<<<<<<<<<< * v5 = 0 * v6 = 0 */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":681 * Z2_sqr = Z2**2 * v3 = 0 * v5 = 0 # <<<<<<<<<<<<<< * v6 = 0 * for k in range(nverts): */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":682 * v3 = 0 * v5 = 0 * v6 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":683 * v5 = 0 * v6 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":684 * v6 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":685 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":686 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 686; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":687 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":688 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":689 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":690 * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v3*mx + v5*my + v6*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":691 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v3*mx + v5*my + v6*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":692 * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v3*mx + v5*my + v6*mz) # <<<<<<<<<<<<<< */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates10) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_v3; __pyx_parallel_temp1 = __pyx_v_X2; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_kp1; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z2_sqr; __pyx_parallel_temp7 = __pyx_v_v6; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_v5; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_Y1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_v3 = __pyx_parallel_temp0; __pyx_v_X2 = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_kp1 = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z2_sqr = __pyx_parallel_temp6; __pyx_v_v6 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_v5 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_Y1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":674 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_2 = (__pyx_v_copy_shape != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0); if (__pyx_t_1) { __pyx_t_2 = (__pyx_v_little_endian != 0); } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_1) { __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ switch (__pyx_v_t) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ case NPY_BYTE: __pyx_v_f = __pyx_k_b; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = __pyx_k_B; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = __pyx_k_h; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = __pyx_k_H; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = __pyx_k_i; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = __pyx_k_I; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = __pyx_k_l; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = __pyx_k_L; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = __pyx_k_q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = __pyx_k_Q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = __pyx_k_f; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = __pyx_k_d; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = __pyx_k_g; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = __pyx_k_Zf; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = __pyx_k_Zd; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = __pyx_k_Zg; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = __pyx_k_O; break; default: /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} break; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0); if (__pyx_t_6) { __pyx_t_7 = (__pyx_v_little_endian != 0); } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_6) { __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L11:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif __Pyx_NAMESTR("_polyprism"), __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_X1, __pyx_k_X1, sizeof(__pyx_k_X1), 0, 0, 1, 1}, {&__pyx_n_s_X2, __pyx_k_X2, sizeof(__pyx_k_X2), 0, 0, 1, 1}, {&__pyx_n_s_Y1, __pyx_k_Y1, sizeof(__pyx_k_Y1), 0, 0, 1, 1}, {&__pyx_n_s_Y2, __pyx_k_Y2, sizeof(__pyx_k_Y2), 0, 0, 1, 1}, {&__pyx_n_s_Z1, __pyx_k_Z1, sizeof(__pyx_k_Z1), 0, 0, 1, 1}, {&__pyx_n_s_Z1_sqr, __pyx_k_Z1_sqr, sizeof(__pyx_k_Z1_sqr), 0, 0, 1, 1}, {&__pyx_n_s_Z2, __pyx_k_Z2, sizeof(__pyx_k_Z2), 0, 0, 1, 1}, {&__pyx_n_s_Z2_sqr, __pyx_k_Z2_sqr, sizeof(__pyx_k_Z2_sqr), 0, 0, 1, 1}, {&__pyx_n_s_bx, __pyx_k_bx, sizeof(__pyx_k_bx), 0, 0, 1, 1}, {&__pyx_n_s_by, __pyx_k_by, sizeof(__pyx_k_by), 0, 0, 1, 1}, {&__pyx_n_s_bz, __pyx_k_bz, sizeof(__pyx_k_bz), 0, 0, 1, 1}, {&__pyx_n_s_density, __pyx_k_density, sizeof(__pyx_k_density), 0, 0, 1, 1}, {&__pyx_n_s_fatiando_gravmag__polyprism, __pyx_k_fatiando_gravmag__polyprism, sizeof(__pyx_k_fatiando_gravmag__polyprism), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_fx, __pyx_k_fx, sizeof(__pyx_k_fx), 0, 0, 1, 1}, {&__pyx_n_s_fy, __pyx_k_fy, sizeof(__pyx_k_fy), 0, 0, 1, 1}, {&__pyx_n_s_fz, __pyx_k_fz, sizeof(__pyx_k_fz), 0, 0, 1, 1}, {&__pyx_n_s_gxx, __pyx_k_gxx, sizeof(__pyx_k_gxx), 0, 0, 1, 1}, {&__pyx_n_s_gxy, __pyx_k_gxy, sizeof(__pyx_k_gxy), 0, 0, 1, 1}, {&__pyx_n_s_gxz, __pyx_k_gxz, sizeof(__pyx_k_gxz), 0, 0, 1, 1}, {&__pyx_n_s_gyy, __pyx_k_gyy, sizeof(__pyx_k_gyy), 0, 0, 1, 1}, {&__pyx_n_s_gyz, __pyx_k_gyz, sizeof(__pyx_k_gyz), 0, 0, 1, 1}, {&__pyx_n_s_gz, __pyx_k_gz, sizeof(__pyx_k_gz), 0, 0, 1, 1}, {&__pyx_n_s_gzz, __pyx_k_gzz, sizeof(__pyx_k_gzz), 0, 0, 1, 1}, {&__pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_k_home_leo_src_fatiando_fatiando, sizeof(__pyx_k_home_leo_src_fatiando_fatiando), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_kernel, __pyx_k_kernel, sizeof(__pyx_k_kernel), 0, 0, 1, 1}, {&__pyx_n_s_kp1, __pyx_k_kp1, sizeof(__pyx_k_kp1), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mx, __pyx_k_mx, sizeof(__pyx_k_mx), 0, 0, 1, 1}, {&__pyx_n_s_my, __pyx_k_my, sizeof(__pyx_k_my), 0, 0, 1, 1}, {&__pyx_n_s_mz, __pyx_k_mz, sizeof(__pyx_k_mz), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_nverts, __pyx_k_nverts, sizeof(__pyx_k_nverts), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tf, __pyx_k_tf, sizeof(__pyx_k_tf), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_v1, __pyx_k_v1, sizeof(__pyx_k_v1), 0, 0, 1, 1}, {&__pyx_n_s_v2, __pyx_k_v2, sizeof(__pyx_k_v2), 0, 0, 1, 1}, {&__pyx_n_s_v3, __pyx_k_v3, sizeof(__pyx_k_v3), 0, 0, 1, 1}, {&__pyx_n_s_v4, __pyx_k_v4, sizeof(__pyx_k_v4), 0, 0, 1, 1}, {&__pyx_n_s_v5, __pyx_k_v5, sizeof(__pyx_k_v5), 0, 0, 1, 1}, {&__pyx_n_s_v6, __pyx_k_v6, sizeof(__pyx_k_v6), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xp, __pyx_k_xp, sizeof(__pyx_k_xp), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_yp, __pyx_k_yp, sizeof(__pyx_k_yp), 0, 0, 1, 1}, {&__pyx_n_s_z1, __pyx_k_z1, sizeof(__pyx_k_z1), 0, 0, 1, 1}, {&__pyx_n_s_z2, __pyx_k_z2, sizeof(__pyx_k_z2), 0, 0, 1, 1}, {&__pyx_n_s_zp, __pyx_k_zp, sizeof(__pyx_k_zp), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__7 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gz, 334, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__9 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxx, 364, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__11 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxy, 394, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__13 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxz, 424, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__15 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyy, 454, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__17 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyz, 484, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__19 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gzz, 514, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__21 = PyTuple_Pack(33, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_fx, __pyx_n_s_fy, __pyx_n_s_fz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(14, 0, 33, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_tf, 544, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__23 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bx, 588, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__25 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v2, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_by, 624, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__27 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v3, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bz, 660, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_polyprism(void); /*proto*/ PyMODINIT_FUNC init_polyprism(void) #else PyMODINIT_FUNC PyInit__polyprism(void); /*proto*/ PyMODINIT_FUNC PyInit__polyprism(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__polyprism(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_polyprism"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_fatiando__gravmag___polyprism) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "fatiando.gravmag._polyprism")) { if (unlikely(PyDict_SetItemString(modules, "fatiando.gravmag._polyprism", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "fatiando/gravmag/_polyprism.pyx":6 * A pure python implementation is in _polyprism_numpy.py * """ * import numpy # <<<<<<<<<<<<<< * * from libc.math cimport log, atan2, sqrt */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "fatiando/gravmag/_polyprism.pyx":15 * from cython.parallel cimport prange, parallel * * DTYPE = numpy.float # <<<<<<<<<<<<<< * ctypedef numpy.float_t DTYPE_T * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gzz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_tf, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_19by, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_by, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":1 * #cython: embedsignature=True # <<<<<<<<<<<<<< * """ * This is a Cython implementation of the potential fields of a polygonal prism. */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init fatiando.gravmag._polyprism", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init fatiando.gravmag._polyprism"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; /* not a 'break' in the loop */ } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { long r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; #endif result = (*call)(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 Py_LeaveRecursiveCall(); #endif if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (result) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *getbuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer); if (getbuffer_cobj) { getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } } #endif PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *releasebuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer); if (releasebuffer_cobj) { releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { const unsigned int neg_one = (unsigned int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(unsigned int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(unsigned int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(unsigned int), little, !is_unsigned); } } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \ { \ func_type value = func(x); \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ PyErr_SetString(PyExc_OverflowError, \ (is_unsigned && unlikely(value < zero)) ? \ "can't convert negative value to " #target_type : \ "value too large to convert to " #target_type); \ return (target_type) -1; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { const unsigned int neg_one = (unsigned int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } return (unsigned int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(unsigned int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyLong_AsLong) } else if (sizeof(unsigned int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int) -1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned int) -1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(long) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/ *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else /* PY_VERSION_HEX < 0x03030000 */ if (PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_DATA_SIZE(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ return PyUnicode_AsUTF8AndSize(o, length); #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ #endif /* PY_VERSION_HEX < 0x03030000 */ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ #if !CYTHON_COMPILING_IN_PYPY #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject*)b)->ob_digit[0]; } #endif #endif #if PY_VERSION_HEX < 0x02060000 return PyInt_AsSsize_t(b); #else return PyLong_AsSsize_t(b); #endif } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } #endif /* Py_PYTHON_H */
/* Generated by Cython 0.20.1 on Thu Jul 3 12:41:06 2014 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #define CYTHON_ABI "0_20_1" #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY #define Py_OptimizeFlag 0 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_VERSION_HEX < 0x02060000 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x02060000 #define Py_TPFLAGS_HAVE_VERSION_TAG 0 #endif #if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT) #define Py_TPFLAGS_IS_ABSTRACT 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \ PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__fatiando__gravmag___polyprism #define __PYX_HAVE_API__fatiando__gravmag___polyprism #include "math.h" #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "omp.h" #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s) #define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s) #define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s) #define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s) #define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return u_end - u - 1; } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { const char* default_encoding_c = PyBytes_AS_STRING(default_encoding); char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (ascii_chars_u == NULL) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } } Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; default_encoding_c = PyBytes_AS_STRING(default_encoding); __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(sys); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); return -1; } #endif #endif #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_polyprism.pyx", "__init__.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "fatiando/gravmag/_polyprism.pyx":16 * * DTYPE = numpy.float * ctypedef numpy.float_t DTYPE_T # <<<<<<<<<<<<<< * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, */ typedef __pyx_t_5numpy_float_t __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) static CYTHON_INLINE long __Pyx_mod_long(long, long); /* proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value); static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /*proto*/ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/ typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'openmp' */ /* Module declarations from 'fatiando.gravmag._polyprism' */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double, double, double, double, double, double, double, double); /*proto*/ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double, double, double, double, double, double, double, double); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T = { "DTYPE_T", NULL, sizeof(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "fatiando.gravmag._polyprism" int __pyx_module_is_main_fatiando__gravmag___polyprism = 0; /* Implementation of 'fatiando.gravmag._polyprism' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static char __pyx_k_B[] = "B"; static char __pyx_k_H[] = "H"; static char __pyx_k_I[] = "I"; static char __pyx_k_L[] = "L"; static char __pyx_k_O[] = "O"; static char __pyx_k_Q[] = "Q"; static char __pyx_k_b[] = "b"; static char __pyx_k_d[] = "d"; static char __pyx_k_f[] = "f"; static char __pyx_k_g[] = "g"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_k[] = "k"; static char __pyx_k_l[] = "l"; static char __pyx_k_q[] = "q"; static char __pyx_k_x[] = "x"; static char __pyx_k_y[] = "y"; static char __pyx_k_X1[] = "X1"; static char __pyx_k_X2[] = "X2"; static char __pyx_k_Y1[] = "Y1"; static char __pyx_k_Y2[] = "Y2"; static char __pyx_k_Z1[] = "Z1"; static char __pyx_k_Z2[] = "Z2"; static char __pyx_k_Zd[] = "Zd"; static char __pyx_k_Zf[] = "Zf"; static char __pyx_k_Zg[] = "Zg"; static char __pyx_k_bx[] = "bx"; static char __pyx_k_by[] = "by"; static char __pyx_k_bz[] = "bz"; static char __pyx_k_fx[] = "fx"; static char __pyx_k_fy[] = "fy"; static char __pyx_k_fz[] = "fz"; static char __pyx_k_gz[] = "gz"; static char __pyx_k_mx[] = "mx"; static char __pyx_k_my[] = "my"; static char __pyx_k_mz[] = "mz"; static char __pyx_k_tf[] = "tf"; static char __pyx_k_v1[] = "v1"; static char __pyx_k_v2[] = "v2"; static char __pyx_k_v3[] = "v3"; static char __pyx_k_v4[] = "v4"; static char __pyx_k_v5[] = "v5"; static char __pyx_k_v6[] = "v6"; static char __pyx_k_xp[] = "xp"; static char __pyx_k_yp[] = "yp"; static char __pyx_k_z1[] = "z1"; static char __pyx_k_z2[] = "z2"; static char __pyx_k_zp[] = "zp"; static char __pyx_k_gxx[] = "gxx"; static char __pyx_k_gxy[] = "gxy"; static char __pyx_k_gxz[] = "gxz"; static char __pyx_k_gyy[] = "gyy"; static char __pyx_k_gyz[] = "gyz"; static char __pyx_k_gzz[] = "gzz"; static char __pyx_k_kp1[] = "kp1"; static char __pyx_k_res[] = "res"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_size[] = "size"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_DTYPE[] = "DTYPE"; static char __pyx_k_float[] = "float"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_Z1_sqr[] = "Z1_sqr"; static char __pyx_k_Z2_sqr[] = "Z2_sqr"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_kernel[] = "kernel"; static char __pyx_k_nverts[] = "nverts"; static char __pyx_k_density[] = "density"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_RuntimeError[] = "RuntimeError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer"; static char __pyx_k_fatiando_gravmag__polyprism[] = "fatiando.gravmag._polyprism"; static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static char __pyx_k_home_leo_src_fatiando_fatiando[] = "/home/leo/src/fatiando/fatiando/gravmag/_polyprism.pyx"; static char __pyx_k_This_is_a_Cython_implementation[] = "\nThis is a Cython implementation of the potential fields of a polygonal prism.\nA pure python implementation is in _polyprism_numpy.py\n"; static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_X1; static PyObject *__pyx_n_s_X2; static PyObject *__pyx_n_s_Y1; static PyObject *__pyx_n_s_Y2; static PyObject *__pyx_n_s_Z1; static PyObject *__pyx_n_s_Z1_sqr; static PyObject *__pyx_n_s_Z2; static PyObject *__pyx_n_s_Z2_sqr; static PyObject *__pyx_n_s_bx; static PyObject *__pyx_n_s_by; static PyObject *__pyx_n_s_bz; static PyObject *__pyx_n_s_density; static PyObject *__pyx_n_s_fatiando_gravmag__polyprism; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_fx; static PyObject *__pyx_n_s_fy; static PyObject *__pyx_n_s_fz; static PyObject *__pyx_n_s_gxx; static PyObject *__pyx_n_s_gxy; static PyObject *__pyx_n_s_gxz; static PyObject *__pyx_n_s_gyy; static PyObject *__pyx_n_s_gyz; static PyObject *__pyx_n_s_gz; static PyObject *__pyx_n_s_gzz; static PyObject *__pyx_kp_s_home_leo_src_fatiando_fatiando; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_kernel; static PyObject *__pyx_n_s_kp1; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mx; static PyObject *__pyx_n_s_my; static PyObject *__pyx_n_s_mz; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_nverts; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_releasebuffer; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_res; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tf; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_v1; static PyObject *__pyx_n_s_v2; static PyObject *__pyx_n_s_v3; static PyObject *__pyx_n_s_v4; static PyObject *__pyx_n_s_v5; static PyObject *__pyx_n_s_v6; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xp; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_yp; static PyObject *__pyx_n_s_z1; static PyObject *__pyx_n_s_z2; static PyObject *__pyx_n_s_zp; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_codeobj__8; static PyObject *__pyx_codeobj__10; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; /* "fatiando/gravmag/_polyprism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, double __pyx_v_Z1_sqr, double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":24 * Qk1, Qk2, Ak1, Ak2, R1k1, R1k2, R2k1, R2k2, Bk1, Bk2, E1k1, \ * E1k2, E2k1, E2k2, Ck1, Ck2 * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * p = X1*Y2 - X2*Y1 */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":25 * E1k2, E2k1, E2k2, Ck1, Ck2 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * p = X1*Y2 - X2*Y1 * p_sqr = p**2 */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":26 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * p = X1*Y2 - X2*Y1 # <<<<<<<<<<<<<< * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 */ __pyx_v_p = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":27 * kernel = 0 * p = X1*Y2 - X2*Y1 * p_sqr = p**2 # <<<<<<<<<<<<<< * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 */ __pyx_v_p_sqr = pow(__pyx_v_p, 2.0); /* "fatiando/gravmag/_polyprism.pyx":28 * p = X1*Y2 - X2*Y1 * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 # <<<<<<<<<<<<<< * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 */ __pyx_v_Qk1 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y1) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X1)); /* "fatiando/gravmag/_polyprism.pyx":29 * p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 # <<<<<<<<<<<<<< * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 */ __pyx_v_Qk2 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y2) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X2)); /* "fatiando/gravmag/_polyprism.pyx":30 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 # <<<<<<<<<<<<<< * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) */ __pyx_v_Ak1 = (pow(__pyx_v_X1, 2.0) + pow(__pyx_v_Y1, 2.0)); /* "fatiando/gravmag/_polyprism.pyx":31 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 # <<<<<<<<<<<<<< * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) */ __pyx_v_Ak2 = (pow(__pyx_v_X2, 2.0) + pow(__pyx_v_Y2, 2.0)); /* "fatiando/gravmag/_polyprism.pyx":32 * Ak1 = X1**2 + Y1**2 * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) # <<<<<<<<<<<<<< * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) */ __pyx_v_R1k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z1_sqr)); /* "fatiando/gravmag/_polyprism.pyx":33 * Ak2 = X2**2 + Y2**2 * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) # <<<<<<<<<<<<<< * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) */ __pyx_v_R1k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z1_sqr)); /* "fatiando/gravmag/_polyprism.pyx":34 * R1k1 = sqrt(Ak1 + Z1_sqr) * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) # <<<<<<<<<<<<<< * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) */ __pyx_v_R2k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":35 * R1k2 = sqrt(Ak2 + Z1_sqr) * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) # <<<<<<<<<<<<<< * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) */ __pyx_v_R2k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":36 * R2k1 = sqrt(Ak1 + Z2_sqr) * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) # <<<<<<<<<<<<<< * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) */ __pyx_v_Ak1 = sqrt(__pyx_v_Ak1); /* "fatiando/gravmag/_polyprism.pyx":37 * R2k2 = sqrt(Ak2 + Z2_sqr) * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) # <<<<<<<<<<<<<< * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) */ __pyx_v_Ak2 = sqrt(__pyx_v_Ak2); /* "fatiando/gravmag/_polyprism.pyx":38 * Ak1 = sqrt(Ak1) * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) # <<<<<<<<<<<<<< * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 */ __pyx_v_Bk1 = sqrt((pow(__pyx_v_Qk1, 2.0) + __pyx_v_p_sqr)); /* "fatiando/gravmag/_polyprism.pyx":39 * Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) # <<<<<<<<<<<<<< * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 */ __pyx_v_Bk2 = sqrt((pow(__pyx_v_Qk2, 2.0) + __pyx_v_p_sqr)); /* "fatiando/gravmag/_polyprism.pyx":40 * Bk1 = sqrt(Qk1**2 + p_sqr) * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 # <<<<<<<<<<<<<< * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 */ __pyx_v_E1k1 = (__pyx_v_R1k1 * __pyx_v_Bk1); /* "fatiando/gravmag/_polyprism.pyx":41 * Bk2 = sqrt(Qk2**2 + p_sqr) * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 # <<<<<<<<<<<<<< * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 */ __pyx_v_E1k2 = (__pyx_v_R1k2 * __pyx_v_Bk2); /* "fatiando/gravmag/_polyprism.pyx":42 * E1k1 = R1k1*Bk1 * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 # <<<<<<<<<<<<<< * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) */ __pyx_v_E2k1 = (__pyx_v_R2k1 * __pyx_v_Bk1); /* "fatiando/gravmag/_polyprism.pyx":43 * E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 # <<<<<<<<<<<<<< * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) */ __pyx_v_E2k2 = (__pyx_v_R2k2 * __pyx_v_Bk2); /* "fatiando/gravmag/_polyprism.pyx":44 * E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) # <<<<<<<<<<<<<< * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) */ __pyx_v_kernel = (__pyx_v_kernel + ((__pyx_v_Z2 - __pyx_v_Z1) * (atan2(__pyx_v_Qk2, __pyx_v_p) - atan2(__pyx_v_Qk1, __pyx_v_p)))); /* "fatiando/gravmag/_polyprism.pyx":45 * E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) # <<<<<<<<<<<<<< * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z2 * (atan2((__pyx_v_Z2 * __pyx_v_Qk1), (__pyx_v_R2k1 * __pyx_v_p)) - atan2((__pyx_v_Z2 * __pyx_v_Qk2), (__pyx_v_R2k2 * __pyx_v_p))))); /* "fatiando/gravmag/_polyprism.pyx":46 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) # <<<<<<<<<<<<<< * Ck1 = Qk1*Ak1 * Ck2 = Qk2*Ak2 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z1 * (atan2((__pyx_v_Z1 * __pyx_v_Qk2), (__pyx_v_R1k2 * __pyx_v_p)) - atan2((__pyx_v_Z1 * __pyx_v_Qk1), (__pyx_v_R1k1 * __pyx_v_p))))); /* "fatiando/gravmag/_polyprism.pyx":47 * kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, R2k2*p)) * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 # <<<<<<<<<<<<<< * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors */ __pyx_v_Ck1 = (__pyx_v_Qk1 * __pyx_v_Ak1); /* "fatiando/gravmag/_polyprism.pyx":48 * kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) * Ck1 = Qk1*Ak1 * Ck2 = Qk2*Ak2 # <<<<<<<<<<<<<< * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( */ __pyx_v_Ck2 = (__pyx_v_Qk2 * __pyx_v_Ak2); /* "fatiando/gravmag/_polyprism.pyx":50 * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( # <<<<<<<<<<<<<< * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_t_1 = (__pyx_v_Bk1 + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":51 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( */ __pyx_t_2 = (__pyx_v_E1k1 - __pyx_v_Ck1); __pyx_t_3 = ((__pyx_v_E1k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":52 * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) # <<<<<<<<<<<<<< * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - */ __pyx_t_4 = (__pyx_v_E2k1 - __pyx_v_Ck1); __pyx_t_5 = ((__pyx_v_E2k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":50 * Ck2 = Qk2*Ak2 * # dummy helps prevent zero division errors * kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( # <<<<<<<<<<<<<< * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak1 / __pyx_t_1)) * (log(((__pyx_t_2 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_4 / __pyx_t_5) + __pyx_v_dummy))))); /* "fatiando/gravmag/_polyprism.pyx":53 * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_t_5 = (__pyx_v_Bk2 + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":54 * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) * return kernel */ __pyx_t_4 = (__pyx_v_E2k2 - __pyx_v_Ck2); __pyx_t_3 = ((__pyx_v_E2k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":55 * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) # <<<<<<<<<<<<<< * return kernel * */ __pyx_t_2 = (__pyx_v_E1k2 - __pyx_v_Ck2); __pyx_t_1 = ((__pyx_v_E1k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "fatiando/gravmag/_polyprism.pyx":53 * log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak2 / __pyx_t_5)) * (log(((__pyx_t_4 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy))))); /* "fatiando/gravmag/_polyprism.pyx":56 * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":18 * ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":58 * return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":65 * aux14, aux15, aux16, n, g, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":66 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":67 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":68 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":69 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":70 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":71 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":72 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":73 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":74 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":75 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":76 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":77 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":78 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":79 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":80 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":81 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":82 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":83 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":84 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":85 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":86 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":87 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":88 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":89 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":90 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":91 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = (((__pyx_v_g * __pyx_v_Y2) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":92 * aux14 = ((p*aux12)/d2) * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":93 * res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":94 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":95 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":96 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":97 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_g * __pyx_v_Y1) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":98 * aux14 = ((p*aux12)/d1) * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":99 * res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":100 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":101 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":102 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":103 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":104 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_n * (__pyx_v_aux15 - __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":105 * aux15 = aux12 - aux13 * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= -aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":106 * res += (n*(aux15 - aux14)) * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* "fatiando/gravmag/_polyprism.pyx":107 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":108 * res *= -aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":58 * return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxx", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":110 * return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":117 * aux14, aux15, aux16, n, g, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":118 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":119 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":120 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":121 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":122 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":123 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":124 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":125 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":126 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":127 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":128 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":129 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":130 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":131 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":132 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":133 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":134 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":135 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":136 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":137 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":138 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":139 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":140 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":141 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":142 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":143 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y2)) * __pyx_v_aux13) - __pyx_v_aux14); /* "fatiando/gravmag/_polyprism.pyx":144 * aux14 = ((p*aux12)/d2) * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":145 * res = (((g*g) + (g*n*Y2))*aux13) - aux14 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":146 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":147 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":148 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":149 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y1)) * __pyx_v_aux13) - __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":150 * aux14 = ((p*aux12)/d1) * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":151 * res -= (((g*g) + (g*n*Y1))*aux13) - aux14 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":152 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":153 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":154 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (aux14 - aux15) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":155 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":156 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (aux14 - aux15) # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_aux14 - __pyx_v_aux15)); /* "fatiando/gravmag/_polyprism.pyx":157 * aux15 = aux12 - aux13 * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":158 * res += (aux14 - aux15) * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":159 * aux0 = (1.0/(1.0 + (n*n))) * res *= aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":160 * res *= aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":110 * return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":162 * return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":169 * aux14, aux15, aux16, n, g, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":170 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":171 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":172 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * n = (aux0/aux1) * g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":173 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) # <<<<<<<<<<<<<< * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":174 * aux1 = Y2 - Y1 + dummy * n = (aux0/aux1) * g = X1 - (Y1*n) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* "fatiando/gravmag/_polyprism.pyx":175 * n = (aux0/aux1) * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":176 * g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":177 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":178 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":179 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":180 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":181 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":182 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":183 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":184 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":185 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":186 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":187 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":188 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":189 * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":190 * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":191 * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":192 * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) # <<<<<<<<<<<<<< * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":193 * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) # <<<<<<<<<<<<<< * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":194 * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) # <<<<<<<<<<<<<< * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* "fatiando/gravmag/_polyprism.pyx":195 * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 # <<<<<<<<<<<<<< * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 */ __pyx_v_res = (((__pyx_v_Y2 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16); /* "fatiando/gravmag/_polyprism.pyx":196 * aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) # <<<<<<<<<<<<<< * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* "fatiando/gravmag/_polyprism.pyx":197 * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_Y1 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16)); /* "fatiando/gravmag/_polyprism.pyx":198 * aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) # <<<<<<<<<<<<<< * res *= -aux0 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":199 * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* "fatiando/gravmag/_polyprism.pyx":200 * aux0 = (1.0/(1.0 + (n*n))) * res *= -aux0 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":201 * res *= -aux0 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":162 * return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":203 * return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":210 * aux14, aux15, p, m, c, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":211 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":212 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":213 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * m = (aux1/aux0) * c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":214 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) # <<<<<<<<<<<<<< * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":215 * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) * c = Y1 - (X1*m) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* "fatiando/gravmag/_polyprism.pyx":216 * m = (aux1/aux0) * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":217 * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":218 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":219 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":220 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":221 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":222 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":223 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":224 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":225 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":226 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":227 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":228 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":229 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":230 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":231 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":232 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":233 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":234 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":235 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* "fatiando/gravmag/_polyprism.pyx":236 * aux13 = (aux12/(p*d2)) * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = (((__pyx_v_c * __pyx_v_X2) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14)); /* "fatiando/gravmag/_polyprism.pyx":237 * aux14 = ((p*aux12)/d2) * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":238 * res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":239 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":240 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) # <<<<<<<<<<<<<< * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":241 * aux12 = aux10 - aux11 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* "fatiando/gravmag/_polyprism.pyx":242 * aux13 = (aux12/(p*d1)) * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) # <<<<<<<<<<<<<< * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_c * __pyx_v_X1) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":243 * aux14 = ((p*aux12)/d1) * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) # <<<<<<<<<<<<<< * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":244 * res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":245 * aux10 = log(((Z2 + R22) + dummy)) * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) # <<<<<<<<<<<<<< * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":246 * aux11 = log(((Z1 + R21) + dummy)) * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) # <<<<<<<<<<<<<< * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":247 * aux12 = log(((Z2 + R12) + dummy)) * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 # <<<<<<<<<<<<<< * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":248 * aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 # <<<<<<<<<<<<<< * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* "fatiando/gravmag/_polyprism.pyx":249 * aux14 = aux10 - aux11 * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_m * (__pyx_v_aux15 - __pyx_v_aux14))); /* "fatiando/gravmag/_polyprism.pyx":250 * aux15 = aux12 - aux13 * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) # <<<<<<<<<<<<<< * res *= aux1 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":251 * res += (m*(aux15 - aux14)) * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":252 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":253 * res *= aux1 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":203 * return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":255 * return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":262 * aux14, aux15, aux16, m, c, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":263 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":264 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":265 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * m = (aux1/aux0) * c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":266 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) # <<<<<<<<<<<<<< * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* "fatiando/gravmag/_polyprism.pyx":267 * aux1 = Y2 - Y1 + dummy * m = (aux1/aux0) * c = Y1 - (X1*m) # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* "fatiando/gravmag/_polyprism.pyx":268 * m = (aux1/aux0) * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":269 * c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":270 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":271 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":272 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":273 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":274 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":275 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":276 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":277 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":278 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":279 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":280 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":281 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":282 * R22 = sqrt(aux7 + aux9) * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) # <<<<<<<<<<<<<< * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":283 * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":284 * aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* "fatiando/gravmag/_polyprism.pyx":285 * aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) # <<<<<<<<<<<<<< * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":286 * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) # <<<<<<<<<<<<<< * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":287 * aux14 = (1.0/(2*d1)) * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) # <<<<<<<<<<<<<< * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* "fatiando/gravmag/_polyprism.pyx":288 * aux15 = (1.0/(2*d2)) * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 # <<<<<<<<<<<<<< * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 */ __pyx_v_res = (((__pyx_v_X2 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16); /* "fatiando/gravmag/_polyprism.pyx":289 * aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) # <<<<<<<<<<<<<< * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* "fatiando/gravmag/_polyprism.pyx":290 * res = (X2*(1.0 + (m*m)) + c*m)*aux16 * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 # <<<<<<<<<<<<<< * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_X1 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16)); /* "fatiando/gravmag/_polyprism.pyx":291 * aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) # <<<<<<<<<<<<<< * res *= aux1 * kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* "fatiando/gravmag/_polyprism.pyx":292 * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* "fatiando/gravmag/_polyprism.pyx":293 * aux1 = (1.0/(1.0 + (m*m))) * res *= aux1 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":294 * res *= aux1 * kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":255 * return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":296 * return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "fatiando/gravmag/_polyprism.pyx":302 * aux5, aux6, aux7, aux8, aux9, aux10, aux11, aux12, p, d1, d2, \ * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 * aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* "fatiando/gravmag/_polyprism.pyx":303 * R11, R12, R21, R22, res * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 # <<<<<<<<<<<<<< * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":304 * DTYPE_T dummy = 1e-10 # Used to avoid singularities * kernel = 0 * aux0 = X2 - X1 + dummy # <<<<<<<<<<<<<< * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":305 * kernel = 0 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":306 * aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* "fatiando/gravmag/_polyprism.pyx":307 * aux1 = Y2 - Y1 + dummy * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) # <<<<<<<<<<<<<< * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":308 * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":309 * aux3 = (X1*Y2) - (X2*Y1) * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) # <<<<<<<<<<<<<< * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":310 * p = ((aux3/aux2)) + dummy * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) # <<<<<<<<<<<<<< * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":311 * aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy # <<<<<<<<<<<<<< * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":312 * aux5 = (aux0*X2) + (aux1*Y2) * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy # <<<<<<<<<<<<<< * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* "fatiando/gravmag/_polyprism.pyx":313 * d1 = ((aux4/aux2)) + dummy * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) # <<<<<<<<<<<<<< * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* "fatiando/gravmag/_polyprism.pyx":314 * d2 = ((aux5/aux2)) + dummy * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) # <<<<<<<<<<<<<< * aux8 = Z1*Z1 * aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* "fatiando/gravmag/_polyprism.pyx":315 * aux6 = (X1*X1) + (Y1*Y1) * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 # <<<<<<<<<<<<<< * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* "fatiando/gravmag/_polyprism.pyx":316 * aux7 = (X2*X2) + (Y2*Y2) * aux8 = Z1*Z1 * aux9 = Z2*Z2 # <<<<<<<<<<<<<< * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* "fatiando/gravmag/_polyprism.pyx":317 * aux8 = Z1*Z1 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) # <<<<<<<<<<<<<< * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":318 * aux9 = Z2*Z2 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":319 * R11 = sqrt(aux6 + aux8) * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* "fatiando/gravmag/_polyprism.pyx":320 * R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* "fatiando/gravmag/_polyprism.pyx":321 * R21 = sqrt(aux7 + aux8) * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* "fatiando/gravmag/_polyprism.pyx":322 * R22 = sqrt(aux7 + aux9) * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * res = aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* "fatiando/gravmag/_polyprism.pyx":323 * aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":324 * aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 * res = aux12 # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = __pyx_v_aux12; /* "fatiando/gravmag/_polyprism.pyx":325 * aux12 = aux10 - aux11 * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* "fatiando/gravmag/_polyprism.pyx":326 * res = aux12 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) # <<<<<<<<<<<<<< * aux12 = aux10 - aux11 * res -= aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* "fatiando/gravmag/_polyprism.pyx":327 * aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 # <<<<<<<<<<<<<< * res -= aux12 * kernel += res */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* "fatiando/gravmag/_polyprism.pyx":328 * aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 * res -= aux12 # <<<<<<<<<<<<<< * kernel += res * return kernel */ __pyx_v_res = (__pyx_v_res - __pyx_v_aux12); /* "fatiando/gravmag/_polyprism.pyx":329 * aux12 = aux10 - aux11 * res -= aux12 * kernel += res # <<<<<<<<<<<<<< * return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* "fatiando/gravmag/_polyprism.pyx":330 * res -= aux12 * kernel += res * return kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* "fatiando/gravmag/_polyprism.pyx":296 * return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, # <<<<<<<<<<<<<< * double Z1, double Z2, double Z1_sqr, double Z2_sqr) nogil: * cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelzz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_gz[] = "gz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz = {__Pyx_NAMESTR("gz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_gz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_gz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":344 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 344; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":345 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 345; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":346 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":347 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":348 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":349 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":350 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":351 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":352 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":353 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":354 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":355 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":356 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":357 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":358 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":359 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":360 * Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_Y1; __pyx_parallel_temp1 = __pyx_v_Z2_sqr; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_Y2; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_kp1; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Y1 = __pyx_parallel_temp0; __pyx_v_Z2_sqr = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_Y2 = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_kp1 = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":346 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx[] = "gxx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx = {__Pyx_NAMESTR("gxx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 368; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":374 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":375 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":376 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":377 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":378 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":379 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":380 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":381 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":382 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":383 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":384 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":385 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":386 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":387 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":388 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":389 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":390 * Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_X2; __pyx_parallel_temp1 = __pyx_v_kernel; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_i; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Y2; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_Z2; __pyx_parallel_temp11 = __pyx_v_kp1; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_X2 = __pyx_parallel_temp0; __pyx_v_kernel = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_i = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Y2 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_Z2 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":376 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy[] = "gxy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy = {__Pyx_NAMESTR("gxy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":404 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 404; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":405 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":406 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":407 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":408 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":409 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":410 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":411 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":412 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":413 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":414 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":415 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":416 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":417 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":418 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":419 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":420 * Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_k; __pyx_parallel_temp1 = __pyx_v_Y1; __pyx_parallel_temp2 = __pyx_v_X2; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Y2; __pyx_parallel_temp5 = __pyx_v_Z2_sqr; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_kp1; __pyx_parallel_temp11 = __pyx_v_i; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_k = __pyx_parallel_temp0; __pyx_v_Y1 = __pyx_parallel_temp1; __pyx_v_X2 = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Y2 = __pyx_parallel_temp4; __pyx_v_Z2_sqr = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_kp1 = __pyx_parallel_temp10; __pyx_v_i = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":406 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz[] = "gxz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz = {__Pyx_NAMESTR("gxz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 426; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 428; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":434 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":435 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 435; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":436 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":437 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); unsigned int __pyx_parallel_temp5 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":438 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":439 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":440 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":441 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":442 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":443 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":444 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":445 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":446 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":447 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":448 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":449 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":450 * Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_kernel; __pyx_parallel_temp1 = __pyx_v_Y2; __pyx_parallel_temp2 = __pyx_v_k; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_kp1; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_Y1; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kernel = __pyx_parallel_temp0; __pyx_v_Y2 = __pyx_parallel_temp1; __pyx_v_k = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_kp1 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_Y1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":436 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy[] = "gyy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy = {__Pyx_NAMESTR("gyy"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyy") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":464 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":465 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 465; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":466 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":467 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":468 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":469 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":470 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":471 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":472 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":473 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":474 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":475 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":476 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":477 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":478 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":479 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":480 * Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_Z1_sqr; __pyx_parallel_temp2 = __pyx_v_i; __pyx_parallel_temp3 = __pyx_v_kernel; __pyx_parallel_temp4 = __pyx_v_Z2; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Y1; __pyx_parallel_temp7 = __pyx_v_X2; __pyx_parallel_temp8 = __pyx_v_X1; __pyx_parallel_temp9 = __pyx_v_Z2_sqr; __pyx_parallel_temp10 = __pyx_v_k; __pyx_parallel_temp11 = __pyx_v_Y2; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_Z1_sqr = __pyx_parallel_temp1; __pyx_v_i = __pyx_parallel_temp2; __pyx_v_kernel = __pyx_parallel_temp3; __pyx_v_Z2 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Y1 = __pyx_parallel_temp6; __pyx_v_X2 = __pyx_parallel_temp7; __pyx_v_X1 = __pyx_parallel_temp8; __pyx_v_Z2_sqr = __pyx_parallel_temp9; __pyx_v_k = __pyx_parallel_temp10; __pyx_v_Y2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":466 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz[] = "gyz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz = {__Pyx_NAMESTR("gyz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 487; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":494 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 494; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":495 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 495; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":496 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":497 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); /* "fatiando/gravmag/_polyprism.pyx":498 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":499 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":500 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":501 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":502 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":503 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":504 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":505 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":506 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":507 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":508 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":509 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":510 * Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_kp1; __pyx_parallel_temp2 = __pyx_v_Z1; __pyx_parallel_temp3 = __pyx_v_X1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Y1; __pyx_parallel_temp9 = __pyx_v_kernel; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_k; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_kp1 = __pyx_parallel_temp1; __pyx_v_Z1 = __pyx_parallel_temp2; __pyx_v_X1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Y1 = __pyx_parallel_temp8; __pyx_v_kernel = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_k = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":496 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz[] = "gzz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz = {__Pyx_NAMESTR("gzz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gzz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_density,&__pyx_n_s_res,0}; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gzz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gzz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":524 * unsigned int nverts, size, i, k, kp1 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":525 * DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":526 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":527 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":528 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":529 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":530 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":531 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * kernel = 0 * for k in range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":532 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * kernel = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* "fatiando/gravmag/_polyprism.pyx":533 * Z2_sqr = Z2**2 * kernel = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":534 * kernel = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":535 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":536 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":537 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":538 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":539 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":540 * Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_k; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_i; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_Z1; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_k = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_i = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_Z1 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":526 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_14tf[] = "tf(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, double fx, double fy, double fz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf = {__Pyx_NAMESTR("tf"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_14tf)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; double __pyx_v_fx; double __pyx_v_fy; double __pyx_v_fz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("tf (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_fx,&__pyx_n_s_fy,&__pyx_n_s_fz,&__pyx_n_s_res,0}; PyObject* values[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 13: if (likely((values[13] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 13); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tf") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 14) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); values[13] = PyTuple_GET_ITEM(__pyx_args, 13); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fx = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_fx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fy = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_fy == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_fz = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_fz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[13]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 545; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 547; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 551; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_fx, __pyx_v_fy, __pyx_v_fz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("tf", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":556 * DTYPE_T v1, v2, v3, v4, v5, v6 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 556; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":557 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":558 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":559 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); unsigned int __pyx_parallel_temp13 = 0xbad0bad0; unsigned int __pyx_parallel_temp14 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp15 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp16 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":560 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":561 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":562 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":563 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v1 = 0 * v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":564 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v1 = 0 # <<<<<<<<<<<<<< * v2 = 0 * v3 = 0 */ __pyx_v_v1 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":565 * Z2_sqr = Z2**2 * v1 = 0 * v2 = 0 # <<<<<<<<<<<<<< * v3 = 0 * v4 = 0 */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":566 * v1 = 0 * v2 = 0 * v3 = 0 # <<<<<<<<<<<<<< * v4 = 0 * v5 = 0 */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":567 * v2 = 0 * v3 = 0 * v4 = 0 # <<<<<<<<<<<<<< * v5 = 0 * v6 = 0 */ __pyx_v_v4 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":568 * v3 = 0 * v4 = 0 * v5 = 0 # <<<<<<<<<<<<<< * v6 = 0 * for k in range(nverts): */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":569 * v4 = 0 * v5 = 0 * v6 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":570 * v5 = 0 * v6 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":571 * v6 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":572 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":573 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":574 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":575 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":576 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":577 * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":578 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":579 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":580 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (fx*(v1*mx + v2*my + v3*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":581 * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (fx*(v1*mx + v2*my + v3*mz) * + fy*(v2*mx + v4*my + v5*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":582 * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (fx*(v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * + fy*(v2*mx + v4*my + v5*mz) * + fz*(v3*mx + v5*my + v6*mz)) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_fx * (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz))) + (__pyx_v_fy * (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)))) + (__pyx_v_fz * (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)))); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_v5; __pyx_parallel_temp2 = __pyx_v_v6; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_v1; __pyx_parallel_temp5 = __pyx_v_v4; __pyx_parallel_temp6 = __pyx_v_v3; __pyx_parallel_temp7 = __pyx_v_Z2_sqr; __pyx_parallel_temp8 = __pyx_v_i; __pyx_parallel_temp9 = __pyx_v_v2; __pyx_parallel_temp10 = __pyx_v_Z1_sqr; __pyx_parallel_temp11 = __pyx_v_X1; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_kp1; __pyx_parallel_temp14 = __pyx_v_k; __pyx_parallel_temp15 = __pyx_v_X2; __pyx_parallel_temp16 = __pyx_v_Z1; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_v5 = __pyx_parallel_temp1; __pyx_v_v6 = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_v1 = __pyx_parallel_temp4; __pyx_v_v4 = __pyx_parallel_temp5; __pyx_v_v3 = __pyx_parallel_temp6; __pyx_v_Z2_sqr = __pyx_parallel_temp7; __pyx_v_i = __pyx_parallel_temp8; __pyx_v_v2 = __pyx_parallel_temp9; __pyx_v_Z1_sqr = __pyx_parallel_temp10; __pyx_v_X1 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_kp1 = __pyx_parallel_temp13; __pyx_v_k = __pyx_parallel_temp14; __pyx_v_X2 = __pyx_parallel_temp15; __pyx_v_Z1 = __pyx_parallel_temp16; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":558 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_16bx[] = "bx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx = {__Pyx_NAMESTR("bx"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_16bx)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bx") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":600 * DTYPE_T v1, v2, v3 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":601 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 601; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":602 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":603 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":604 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":605 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":606 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":607 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v1 = 0 * v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":608 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v1 = 0 # <<<<<<<<<<<<<< * v2 = 0 * v3 = 0 */ __pyx_v_v1 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":609 * Z2_sqr = Z2**2 * v1 = 0 * v2 = 0 # <<<<<<<<<<<<<< * v3 = 0 * for k in range(nverts): */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":610 * v1 = 0 * v2 = 0 * v3 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":611 * v2 = 0 * v3 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":612 * v3 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":613 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":614 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":615 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":616 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":617 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":618 * Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v1*mx + v2*my + v3*mz) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":619 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v1*mx + v2*my + v3*mz) * */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":620 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v1*mx + v2*my + v3*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_Z2_sqr; __pyx_parallel_temp1 = __pyx_v_v1; __pyx_parallel_temp2 = __pyx_v_Y2; __pyx_parallel_temp3 = __pyx_v_Z1_sqr; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_X1; __pyx_parallel_temp6 = __pyx_v_i; __pyx_parallel_temp7 = __pyx_v_v2; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_v3; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_kp1; __pyx_parallel_temp12 = __pyx_v_Z1; __pyx_parallel_temp13 = __pyx_v_X2; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2_sqr = __pyx_parallel_temp0; __pyx_v_v1 = __pyx_parallel_temp1; __pyx_v_Y2 = __pyx_parallel_temp2; __pyx_v_Z1_sqr = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_X1 = __pyx_parallel_temp5; __pyx_v_i = __pyx_parallel_temp6; __pyx_v_v2 = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_v3 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; __pyx_v_Z1 = __pyx_parallel_temp12; __pyx_v_X2 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":602 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_18by[] = "by(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_19by = {__Pyx_NAMESTR("by"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_19by, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_18by)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("by (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "by") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 625; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 631; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_18by(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("by", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":636 * DTYPE_T v2, v4, v5 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":637 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":638 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":639 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); unsigned int __pyx_parallel_temp7 = 0xbad0bad0; unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":640 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":641 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":642 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v2 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":643 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v2 = 0 * v4 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":644 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v2 = 0 # <<<<<<<<<<<<<< * v4 = 0 * v5 = 0 */ __pyx_v_v2 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":645 * Z2_sqr = Z2**2 * v2 = 0 * v4 = 0 # <<<<<<<<<<<<<< * v5 = 0 * for k in range(nverts): */ __pyx_v_v4 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":646 * v2 = 0 * v4 = 0 * v5 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":647 * v4 = 0 * v5 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":648 * v5 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":649 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":650 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":651 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":652 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":653 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":654 * Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v2*mx + v4*my + v5*mz) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":655 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v2*mx + v4*my + v5*mz) * */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":656 * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v2*mx + v4*my + v5*mz) # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_Z1; __pyx_parallel_temp1 = __pyx_v_Z2; __pyx_parallel_temp2 = __pyx_v_v4; __pyx_parallel_temp3 = __pyx_v_v5; __pyx_parallel_temp4 = __pyx_v_v2; __pyx_parallel_temp5 = __pyx_v_X2; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_kp1; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Y1; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_Z2_sqr; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_X1; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z1 = __pyx_parallel_temp0; __pyx_v_Z2 = __pyx_parallel_temp1; __pyx_v_v4 = __pyx_parallel_temp2; __pyx_v_v5 = __pyx_parallel_temp3; __pyx_v_v2 = __pyx_parallel_temp4; __pyx_v_X2 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_kp1 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Y1 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_Z2_sqr = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_X1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":638 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_20bz[] = "bz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz = {__Pyx_NAMESTR("bz"), (PyCFunction)__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_20bz)}; static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp,&__pyx_n_s_yp,&__pyx_n_s_zp,&__pyx_n_s_x,&__pyx_n_s_y,&__pyx_n_s_z1,&__pyx_n_s_z2,&__pyx_n_s_mx,&__pyx_n_s_my,&__pyx_n_s_mz,&__pyx_n_s_res,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bz") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *)values[0]); __pyx_v_yp = ((PyArrayObject *)values[1]); __pyx_v_zp = ((PyArrayObject *)values[2]); __pyx_v_x = ((PyArrayObject *)values[3]); __pyx_v_y = ((PyArrayObject *)values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_res = ((PyArrayObject *)values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 664; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_xp, PyArrayObject *__pyx_v_yp, PyArrayObject *__pyx_v_zp, PyArrayObject *__pyx_v_x, PyArrayObject *__pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject *__pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject*)__pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject*)__pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject*)__pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject*)__pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject*)__pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject*)__pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* "fatiando/gravmag/_polyprism.pyx":672 * DTYPE_T v3, v5, v6 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) # <<<<<<<<<<<<<< * size = len(res) * with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 672; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nverts = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":673 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr * nverts = len(x) * size = len(res) # <<<<<<<<<<<<<< * with nogil: * for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *)__pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_size = __pyx_t_1; /* "fatiando/gravmag/_polyprism.pyx":674 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "fatiando/gravmag/_polyprism.pyx":675 * size = len(res) * with nogil: * for i in prange(size): # <<<<<<<<<<<<<< * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { { for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++){ if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* Initialize private variables to invalid values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T)__PYX_NAN()); /* "fatiando/gravmag/_polyprism.pyx":676 * with nogil: * for i in prange(size): * Z1 = z1 - zp[i] # <<<<<<<<<<<<<< * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":677 * for i in prange(size): * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":678 * Z1 = z1 - zp[i] * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 # <<<<<<<<<<<<<< * Z2_sqr = Z2**2 * v3 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* "fatiando/gravmag/_polyprism.pyx":679 * Z2 = z2 - zp[i] * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 # <<<<<<<<<<<<<< * v3 = 0 * v5 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* "fatiando/gravmag/_polyprism.pyx":680 * Z1_sqr = Z1**2 * Z2_sqr = Z2**2 * v3 = 0 # <<<<<<<<<<<<<< * v5 = 0 * v6 = 0 */ __pyx_v_v3 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":681 * Z2_sqr = Z2**2 * v3 = 0 * v5 = 0 # <<<<<<<<<<<<<< * v6 = 0 * for k in range(nverts): */ __pyx_v_v5 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":682 * v3 = 0 * v5 = 0 * v6 = 0 # <<<<<<<<<<<<<< * for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* "fatiando/gravmag/_polyprism.pyx":683 * v5 = 0 * v6 = 0 * for k in range(nverts): # <<<<<<<<<<<<<< * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_k = __pyx_t_8; /* "fatiando/gravmag/_polyprism.pyx":684 * v6 = 0 * for k in range(nverts): * X1 = x[k] - xp[i] # <<<<<<<<<<<<<< * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":685 * for k in range(nverts): * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] # <<<<<<<<<<<<<< * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":686 * X1 = x[k] - xp[i] * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # <<<<<<<<<<<<<< * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[0]; __pyx_lineno = 686; __pyx_clineno = __LINE__; goto __pyx_L8_error;} } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* "fatiando/gravmag/_polyprism.pyx":687 * Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] # <<<<<<<<<<<<<< * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":688 * kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] # <<<<<<<<<<<<<< * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* "fatiando/gravmag/_polyprism.pyx":689 * X2 = x[kp1] - xp[i] * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":690 * Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v3*mx + v5*my + v6*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* "fatiando/gravmag/_polyprism.pyx":691 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) # <<<<<<<<<<<<<< * res[i] += (v3*mx + v5*my + v6*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* "fatiando/gravmag/_polyprism.pyx":692 * v5 += kernelyz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * v6 += kernelzz(X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += (v3*mx + v5*my + v6*mz) # <<<<<<<<<<<<<< */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; { __pyx_parallel_temp0 = __pyx_v_v3; __pyx_parallel_temp1 = __pyx_v_X2; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_kp1; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z2_sqr; __pyx_parallel_temp7 = __pyx_v_v6; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_v5; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_Y1; } __pyx_L13:; } } /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_v3 = __pyx_parallel_temp0; __pyx_v_X2 = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_kp1 = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z2_sqr = __pyx_parallel_temp6; __pyx_v_v6 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_v5 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_Y1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "fatiando/gravmag/_polyprism.pyx":674 * nverts = len(x) * size = len(res) * with nogil: # <<<<<<<<<<<<<< * for i in prange(size): * Z1 = z1 - zp[i] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_3) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_2 = (__pyx_v_copy_shape != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef list stack */ __pyx_v_f = NULL; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef list stack * cdef int offset */ __pyx_t_4 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L10:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0); if (__pyx_t_1) { __pyx_t_2 = (__pyx_v_little_endian != 0); } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_1) { __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ switch (__pyx_v_t) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ case NPY_BYTE: __pyx_v_f = __pyx_k_b; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = __pyx_k_B; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = __pyx_k_h; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = __pyx_k_H; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = __pyx_k_i; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = __pyx_k_I; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = __pyx_k_l; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = __pyx_k_L; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = __pyx_k_q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = __pyx_k_Q; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = __pyx_k_f; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = __pyx_k_d; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = __pyx_k_g; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = __pyx_k_Zf; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = __pyx_k_Zd; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = __pyx_k_Zg; break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = __pyx_k_O; break; default: /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} break; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_9; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * cdef int delta_offset * cdef tuple i * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef tuple i * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0); if (__pyx_t_6) { __pyx_t_7 = (__pyx_v_little_endian != 0); } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_6) { __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L11:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /*else*/ { /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif __Pyx_NAMESTR("_polyprism"), __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_X1, __pyx_k_X1, sizeof(__pyx_k_X1), 0, 0, 1, 1}, {&__pyx_n_s_X2, __pyx_k_X2, sizeof(__pyx_k_X2), 0, 0, 1, 1}, {&__pyx_n_s_Y1, __pyx_k_Y1, sizeof(__pyx_k_Y1), 0, 0, 1, 1}, {&__pyx_n_s_Y2, __pyx_k_Y2, sizeof(__pyx_k_Y2), 0, 0, 1, 1}, {&__pyx_n_s_Z1, __pyx_k_Z1, sizeof(__pyx_k_Z1), 0, 0, 1, 1}, {&__pyx_n_s_Z1_sqr, __pyx_k_Z1_sqr, sizeof(__pyx_k_Z1_sqr), 0, 0, 1, 1}, {&__pyx_n_s_Z2, __pyx_k_Z2, sizeof(__pyx_k_Z2), 0, 0, 1, 1}, {&__pyx_n_s_Z2_sqr, __pyx_k_Z2_sqr, sizeof(__pyx_k_Z2_sqr), 0, 0, 1, 1}, {&__pyx_n_s_bx, __pyx_k_bx, sizeof(__pyx_k_bx), 0, 0, 1, 1}, {&__pyx_n_s_by, __pyx_k_by, sizeof(__pyx_k_by), 0, 0, 1, 1}, {&__pyx_n_s_bz, __pyx_k_bz, sizeof(__pyx_k_bz), 0, 0, 1, 1}, {&__pyx_n_s_density, __pyx_k_density, sizeof(__pyx_k_density), 0, 0, 1, 1}, {&__pyx_n_s_fatiando_gravmag__polyprism, __pyx_k_fatiando_gravmag__polyprism, sizeof(__pyx_k_fatiando_gravmag__polyprism), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_fx, __pyx_k_fx, sizeof(__pyx_k_fx), 0, 0, 1, 1}, {&__pyx_n_s_fy, __pyx_k_fy, sizeof(__pyx_k_fy), 0, 0, 1, 1}, {&__pyx_n_s_fz, __pyx_k_fz, sizeof(__pyx_k_fz), 0, 0, 1, 1}, {&__pyx_n_s_gxx, __pyx_k_gxx, sizeof(__pyx_k_gxx), 0, 0, 1, 1}, {&__pyx_n_s_gxy, __pyx_k_gxy, sizeof(__pyx_k_gxy), 0, 0, 1, 1}, {&__pyx_n_s_gxz, __pyx_k_gxz, sizeof(__pyx_k_gxz), 0, 0, 1, 1}, {&__pyx_n_s_gyy, __pyx_k_gyy, sizeof(__pyx_k_gyy), 0, 0, 1, 1}, {&__pyx_n_s_gyz, __pyx_k_gyz, sizeof(__pyx_k_gyz), 0, 0, 1, 1}, {&__pyx_n_s_gz, __pyx_k_gz, sizeof(__pyx_k_gz), 0, 0, 1, 1}, {&__pyx_n_s_gzz, __pyx_k_gzz, sizeof(__pyx_k_gzz), 0, 0, 1, 1}, {&__pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_k_home_leo_src_fatiando_fatiando, sizeof(__pyx_k_home_leo_src_fatiando_fatiando), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_kernel, __pyx_k_kernel, sizeof(__pyx_k_kernel), 0, 0, 1, 1}, {&__pyx_n_s_kp1, __pyx_k_kp1, sizeof(__pyx_k_kp1), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mx, __pyx_k_mx, sizeof(__pyx_k_mx), 0, 0, 1, 1}, {&__pyx_n_s_my, __pyx_k_my, sizeof(__pyx_k_my), 0, 0, 1, 1}, {&__pyx_n_s_mz, __pyx_k_mz, sizeof(__pyx_k_mz), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_nverts, __pyx_k_nverts, sizeof(__pyx_k_nverts), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tf, __pyx_k_tf, sizeof(__pyx_k_tf), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_v1, __pyx_k_v1, sizeof(__pyx_k_v1), 0, 0, 1, 1}, {&__pyx_n_s_v2, __pyx_k_v2, sizeof(__pyx_k_v2), 0, 0, 1, 1}, {&__pyx_n_s_v3, __pyx_k_v3, sizeof(__pyx_k_v3), 0, 0, 1, 1}, {&__pyx_n_s_v4, __pyx_k_v4, sizeof(__pyx_k_v4), 0, 0, 1, 1}, {&__pyx_n_s_v5, __pyx_k_v5, sizeof(__pyx_k_v5), 0, 0, 1, 1}, {&__pyx_n_s_v6, __pyx_k_v6, sizeof(__pyx_k_v6), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xp, __pyx_k_xp, sizeof(__pyx_k_xp), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_yp, __pyx_k_yp, sizeof(__pyx_k_yp), 0, 0, 1, 1}, {&__pyx_n_s_z1, __pyx_k_z1, sizeof(__pyx_k_z1), 0, 0, 1, 1}, {&__pyx_n_s_z2, __pyx_k_z2, sizeof(__pyx_k_z2), 0, 0, 1, 1}, {&__pyx_n_s_zp, __pyx_k_zp, sizeof(__pyx_k_zp), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__7 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gz, 334, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__9 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxx, 364, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__11 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxy, 394, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__13 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxz, 424, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__15 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyy, 454, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__17 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyz, 484, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__19 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gzz, 514, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__21 = PyTuple_Pack(33, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_fx, __pyx_n_s_fy, __pyx_n_s_fz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject*)__Pyx_PyCode_New(14, 0, 33, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_tf, 544, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__23 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__23)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bx, 588, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__25 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v2, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__25)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_by, 624, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__27 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v3, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__27)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bz, 660, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_polyprism(void); /*proto*/ PyMODINIT_FUNC init_polyprism(void) #else PyMODINIT_FUNC PyInit__polyprism(void); /*proto*/ PyMODINIT_FUNC PyInit__polyprism(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__polyprism(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_polyprism"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_fatiando__gravmag___polyprism) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "fatiando.gravmag._polyprism")) { if (unlikely(PyDict_SetItemString(modules, "fatiando.gravmag._polyprism", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "fatiando/gravmag/_polyprism.pyx":6 * A pure python implementation is in _polyprism_numpy.py * """ * import numpy # <<<<<<<<<<<<<< * * from libc.math cimport log, atan2, sqrt */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "fatiando/gravmag/_polyprism.pyx":15 * from cython.parallel cimport prange, parallel * * DTYPE = numpy.float # <<<<<<<<<<<<<< * ctypedef numpy.float_t DTYPE_T * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":334 * @cython.wraparound(False) * @cython.boundscheck(False) * def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":364 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":394 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":424 * @cython.wraparound(False) * @cython.boundscheck(False) * def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":454 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyy, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":484 * @cython.wraparound(False) * @cython.boundscheck(False) * def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":514 * @cython.wraparound(False) * @cython.boundscheck(False) * def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gzz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":544 * @cython.wraparound(False) * @cython.boundscheck(False) * def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_tf, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":588 * @cython.wraparound(False) * @cython.boundscheck(False) * def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bx, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":624 * @cython.wraparound(False) * @cython.boundscheck(False) * def by(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_19by, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_by, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":660 * @cython.wraparound(False) * @cython.boundscheck(False) * def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not None, # <<<<<<<<<<<<<< * numpy.ndarray[DTYPE_T, ndim=1] yp not None, * numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bz, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "fatiando/gravmag/_polyprism.pyx":1 * #cython: embedsignature=True # <<<<<<<<<<<<<< * """ * This is a Cython implementation of the potential fields of a polygonal prism. */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init fatiando.gravmag._polyprism", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init fatiando.gravmag._polyprism"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; /* not a 'break' in the loop */ } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { long r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; #endif result = (*call)(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 Py_LeaveRecursiveCall(); #endif if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (result) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *getbuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer); if (getbuffer_cobj) { getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } } #endif PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *releasebuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer); if (releasebuffer_cobj) { releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) { const unsigned int neg_one = (unsigned int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(unsigned int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(unsigned int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(unsigned int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(unsigned int), little, !is_unsigned); } } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \ { \ func_type value = func(x); \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ PyErr_SetString(PyExc_OverflowError, \ (is_unsigned && unlikely(value < zero)) ? \ "can't convert negative value to " #target_type : \ "value too large to convert to " #target_type); \ return (target_type) -1; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *x) { const unsigned int neg_one = (unsigned int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } return (unsigned int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int) -1; } if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(unsigned int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(unsigned int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyLong_AsLong) } else if (sizeof(unsigned int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int) -1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned int) -1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(long) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/ *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else /* PY_VERSION_HEX < 0x03030000 */ if (PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_DATA_SIZE(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ return PyUnicode_AsUTF8AndSize(o, length); #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ #endif /* PY_VERSION_HEX < 0x03030000 */ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ #if !CYTHON_COMPILING_IN_PYPY #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject*)b)->ob_digit[0]; } #endif #endif #if PY_VERSION_HEX < 0x02060000 return PyInt_AsSsize_t(b); #else return PyLong_AsSsize_t(b); #endif } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } #endif /* Py_PYTHON_H */
/* Generated by Cython 0.20.1 on Thu Jul 3 12:41:06 2014 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #define CYTHON_ABI "0_20_1" #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY #define Py_OptimizeFlag 0 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc) (PyObject *, Py_buffer *, int); typedef void (*releasebufferproc) (PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_VERSION_HEX < 0x02060000 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x02060000 #define Py_TPFLAGS_HAVE_VERSION_TAG 0 #endif #if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT) #define Py_TPFLAGS_IS_ABSTRACT 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \ PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* * Initialize NaN. The sign is irrelevant, an exponent with all bits 1 * and a nonzero mantissa means NaN. If the first bit in the mantissa is * 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__fatiando__gravmag___polyprism #define __PYX_HAVE_API__fatiando__gravmag___polyprism #include "math.h" #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "omp.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED #if defined(__GNUC__) #if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) #define CYTHON_UNUSED __attribute__ ((__unused__)) #else #define CYTHON_UNUSED #endif #elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) #define CYTHON_UNUSED __attribute__ ((__unused__)) #else #define CYTHON_UNUSED #endif #endif typedef struct { PyObject **p; char *s; const Py_ssize_t n; const char *encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /* proto */ #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char *__Pyx_PyObject_AsString(PyObject *); static CYTHON_INLINE char *__Pyx_PyObject_AsStringAndSize(PyObject *, Py_ssize_t * length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject *__Pyx_PyUnicode_FromString(char *); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s) #define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s) #define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s) #define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s) #define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE * u) { const Py_UNICODE *u_end = u; while (*u_end++); return u_end - u - 1; } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject *); static CYTHON_INLINE PyObject *__Pyx_PyNumber_Int(PyObject * x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject *); static CYTHON_INLINE PyObject *__Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject *sys = NULL; PyObject *default_encoding = NULL; PyObject *ascii_chars_u = NULL; PyObject *ascii_chars_b = NULL; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char *)(const char *)"getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { const char *default_encoding_c = PyBytes_AS_STRING(default_encoding); char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (ascii_chars_u == NULL) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } } Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char *__PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject *sys = NULL; PyObject *default_encoding = NULL; char *default_encoding_c; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char *)(const char *)"getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; default_encoding_c = PyBytes_AS_STRING(default_encoding); __PYX_DEFAULT_STRING_ENCODING = (char *)malloc(strlen(default_encoding_c)); strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(sys); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); return -1; } #endif #endif #ifdef __GNUC__ /* Test for GCC > 2.95 */ #if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* __GNUC__ > 2 ... */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ > 2 ... */ #else /* __GNUC__ */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char *__pyx_cfilenm = __FILE__; static const char *__pyx_filename; #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "_polyprism.pyx", "__init__.pxd", "type.pxd", }; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char *name; /* for error messages only */ struct __Pyx_StructField_ *fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, * _U_nsigned int, _S_truct, _P_ointer, * _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo *type; const char *name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField *field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem *head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":723 # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< ctypedef * npy_int16 int16_t ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":724 * * ctypedef npy_int8 int8_t ctypedef npy_int16 int16_t # * <<<<<<<<<<<<<< ctypedef npy_int32 int32_t ctypedef npy_int64 * int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":725 ctypedef npy_int8 int8_t ctypedef npy_int16 * int16_t ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":726 ctypedef npy_int16 int16_t ctypedef npy_int32 * int32_t ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":730 #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< ctypedef * npy_uint16 uint16_t ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":731 * * ctypedef npy_uint8 uint8_t ctypedef npy_uint16 uint16_t * # <<<<<<<<<<<<<< ctypedef npy_uint32 uint32_t ctypedef npy_uint64 * uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":732 ctypedef npy_uint8 uint8_t ctypedef npy_uint16 * uint16_t ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":733 ctypedef npy_uint16 uint16_t ctypedef npy_uint32 * uint32_t ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":737 #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< ctypedef * npy_float64 float64_t #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":738 * * ctypedef npy_float32 float32_t ctypedef npy_float64 float64_t * # <<<<<<<<<<<<<< #ctypedef npy_float80 float80_t #ctypedef npy_float128 * float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":747 # The int types are mapped a bit surprising -- # * numpy.int corresponds to 'l' and numpy.long to 'q' ctypedef npy_long * int_t # <<<<<<<<<<<<<< ctypedef npy_longlong long_t ctypedef * npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":748 # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t ctypedef npy_longlong long_t # * <<<<<<<<<<<<<< ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":749 ctypedef npy_long int_t ctypedef npy_longlong * long_t ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":751 ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< ctypedef * npy_ulonglong ulong_t ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":752 * * ctypedef npy_ulong uint_t ctypedef npy_ulonglong ulong_t # * <<<<<<<<<<<<<< ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":753 ctypedef npy_ulong uint_t ctypedef npy_ulonglong * ulong_t ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":755 ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< ctypedef * npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":756 * * ctypedef npy_intp intp_t ctypedef npy_uintp uintp_t # * <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":758 ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< ctypedef * npy_double double_t ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":759 * * ctypedef npy_double float_t ctypedef npy_double double_t * # <<<<<<<<<<<<<< ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":760 ctypedef npy_double float_t ctypedef npy_double * double_t ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* * "fatiando/gravmag/_polyprism.pyx":16 * * DTYPE = numpy.float ctypedef numpy.float_t DTYPE_T # * <<<<<<<<<<<<<< * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, */ typedef __pyx_t_5numpy_float_t __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T; #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef: : std: :complex < float >__pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef: : std: :complex < double >__pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":762 ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< ctypedef * npy_cdouble cdouble_t ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":763 * * ctypedef npy_cfloat cfloat_t ctypedef npy_cdouble cdouble_t * # <<<<<<<<<<<<<< ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":764 ctypedef npy_cfloat cfloat_t ctypedef npy_cdouble * cdouble_t ctypedef npy_clongdouble clongdouble_t # * <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":766 ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF) (void *, PyObject *, int); void (*DECREF) (void *, PyObject *, int); void (*GOTREF) (void *, PyObject *, int); void (*GIVEREF) (void *, PyObject *, int); void *(*SetupContext) (const char *, int, const char *); void (*FinishContext) (void **); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /* proto */ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject * __Pyx_PyObject_GetAttrStr(PyObject * obj, PyObject * attr_name) { PyTypeObject *tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject * name); /* proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject * type, PyObject * value, PyObject * tb); /* proto */ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject ** type, PyObject ** value, PyObject ** tb); /* proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback); /* proto */ static void __Pyx_RaiseArgtupleInvalid(const char *func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* proto */ static void __Pyx_RaiseDoubleKeywordsError(const char *func_name, PyObject * kw_name); /* proto */ static int __Pyx_ParseOptionalKeywords(PyObject * kwds, PyObject ** argnames[], \ PyObject * kwds2, PyObject * values[], Py_ssize_t num_pos_args, \ const char *function_name); /* proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject * obj, PyTypeObject * type, int none_allowed, const char *name, int exact); /* proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer * buf, PyObject * obj, __Pyx_TypeInfo * dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem * stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer * info); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) static CYTHON_INLINE long __Pyx_mod_long(long, long); /* proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject *__Pyx_PyObject_Call(PyObject * func, PyObject * arg, PyObject * kw); /* proto */ #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static void __Pyx_Raise(PyObject * type, PyObject * value, PyObject * tb, PyObject * cause); /* proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject * obj, PyTypeObject * type); /* proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject * name); /* proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject * obj, Py_buffer * view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer * view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject * name, PyObject * from_list, int level); /* proto */ static CYTHON_INLINE PyObject *__Pyx_PyInt_From_unsigned_int(unsigned int value); static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject *); #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif static CYTHON_INLINE PyObject *__Pyx_PyInt_From_int(int value); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject *__Pyx_PyInt_From_long(long value); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif static PyObject *__Pyx_ImportModule(const char *name); /* proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* proto */ typedef struct { int code_line; PyCodeObject *code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry *entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0, 0, NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry * entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject * code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry * t); /* proto */ /* Module declarations from 'libc.math' */ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from 'cpython.object' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /* proto */ /* Module declarations from 'cython' */ /* Module declarations from 'openmp' */ /* Module declarations from 'fatiando.gravmag._polyprism' */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double, double, double, double, double, double, double, double); /* proto */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double, double, double, double, double, double, double, double); /* proto */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double, double, double, double, double, double, double, double); /* proto */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double, double, double, double, double, double, double, double); /* proto */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double, double, double, double, double, double, double, double); /* proto */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double, double, double, double, double, double, double, double); /* proto */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double, double, double, double, double, double, double, double); /* proto */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T = {"DTYPE_T", NULL, sizeof(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T), {0}, 0, 'R', 0, 0}; #define __Pyx_MODULE_NAME "fatiando.gravmag._polyprism" int __pyx_module_is_main_fatiando__gravmag___polyprism = 0; /* Implementation of 'fatiando.gravmag._polyprism' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject * __pyx_v_res); /* proto */ static PyObject *__pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject * __pyx_v_res); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject * __pyx_v_self, Py_buffer * __pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject * __pyx_v_self, Py_buffer * __pyx_v_info); /* proto */ static char __pyx_k_B[] = "B"; static char __pyx_k_H[] = "H"; static char __pyx_k_I[] = "I"; static char __pyx_k_L[] = "L"; static char __pyx_k_O[] = "O"; static char __pyx_k_Q[] = "Q"; static char __pyx_k_b[] = "b"; static char __pyx_k_d[] = "d"; static char __pyx_k_f[] = "f"; static char __pyx_k_g[] = "g"; static char __pyx_k_h[] = "h"; static char __pyx_k_i[] = "i"; static char __pyx_k_k[] = "k"; static char __pyx_k_l[] = "l"; static char __pyx_k_q[] = "q"; static char __pyx_k_x[] = "x"; static char __pyx_k_y[] = "y"; static char __pyx_k_X1[] = "X1"; static char __pyx_k_X2[] = "X2"; static char __pyx_k_Y1[] = "Y1"; static char __pyx_k_Y2[] = "Y2"; static char __pyx_k_Z1[] = "Z1"; static char __pyx_k_Z2[] = "Z2"; static char __pyx_k_Zd[] = "Zd"; static char __pyx_k_Zf[] = "Zf"; static char __pyx_k_Zg[] = "Zg"; static char __pyx_k_bx[] = "bx"; static char __pyx_k_by[] = "by"; static char __pyx_k_bz[] = "bz"; static char __pyx_k_fx[] = "fx"; static char __pyx_k_fy[] = "fy"; static char __pyx_k_fz[] = "fz"; static char __pyx_k_gz[] = "gz"; static char __pyx_k_mx[] = "mx"; static char __pyx_k_my[] = "my"; static char __pyx_k_mz[] = "mz"; static char __pyx_k_tf[] = "tf"; static char __pyx_k_v1[] = "v1"; static char __pyx_k_v2[] = "v2"; static char __pyx_k_v3[] = "v3"; static char __pyx_k_v4[] = "v4"; static char __pyx_k_v5[] = "v5"; static char __pyx_k_v6[] = "v6"; static char __pyx_k_xp[] = "xp"; static char __pyx_k_yp[] = "yp"; static char __pyx_k_z1[] = "z1"; static char __pyx_k_z2[] = "z2"; static char __pyx_k_zp[] = "zp"; static char __pyx_k_gxx[] = "gxx"; static char __pyx_k_gxy[] = "gxy"; static char __pyx_k_gxz[] = "gxz"; static char __pyx_k_gyy[] = "gyy"; static char __pyx_k_gyz[] = "gyz"; static char __pyx_k_gzz[] = "gzz"; static char __pyx_k_kp1[] = "kp1"; static char __pyx_k_res[] = "res"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_size[] = "size"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_DTYPE[] = "DTYPE"; static char __pyx_k_float[] = "float"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_Z1_sqr[] = "Z1_sqr"; static char __pyx_k_Z2_sqr[] = "Z2_sqr"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_kernel[] = "kernel"; static char __pyx_k_nverts[] = "nverts"; static char __pyx_k_density[] = "density"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_RuntimeError[] = "RuntimeError"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer"; static char __pyx_k_fatiando_gravmag__polyprism[] = "fatiando.gravmag._polyprism"; static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static char __pyx_k_home_leo_src_fatiando_fatiando[] = "/home/leo/src/fatiando/fatiando/gravmag/_polyprism.pyx"; static char __pyx_k_This_is_a_Cython_implementation[] = "\nThis is a Cython implementation of the potential fields of a polygonal prism.\nA pure python implementation is in _polyprism_numpy.py\n"; static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_DTYPE; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_X1; static PyObject *__pyx_n_s_X2; static PyObject *__pyx_n_s_Y1; static PyObject *__pyx_n_s_Y2; static PyObject *__pyx_n_s_Z1; static PyObject *__pyx_n_s_Z1_sqr; static PyObject *__pyx_n_s_Z2; static PyObject *__pyx_n_s_Z2_sqr; static PyObject *__pyx_n_s_bx; static PyObject *__pyx_n_s_by; static PyObject *__pyx_n_s_bz; static PyObject *__pyx_n_s_density; static PyObject *__pyx_n_s_fatiando_gravmag__polyprism; static PyObject *__pyx_n_s_float; static PyObject *__pyx_n_s_fx; static PyObject *__pyx_n_s_fy; static PyObject *__pyx_n_s_fz; static PyObject *__pyx_n_s_gxx; static PyObject *__pyx_n_s_gxy; static PyObject *__pyx_n_s_gxz; static PyObject *__pyx_n_s_gyy; static PyObject *__pyx_n_s_gyz; static PyObject *__pyx_n_s_gz; static PyObject *__pyx_n_s_gzz; static PyObject *__pyx_kp_s_home_leo_src_fatiando_fatiando; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_kernel; static PyObject *__pyx_n_s_kp1; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mx; static PyObject *__pyx_n_s_my; static PyObject *__pyx_n_s_mz; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_nverts; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_releasebuffer; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_res; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_tf; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_v1; static PyObject *__pyx_n_s_v2; static PyObject *__pyx_n_s_v3; static PyObject *__pyx_n_s_v4; static PyObject *__pyx_n_s_v5; static PyObject *__pyx_n_s_v6; static PyObject *__pyx_n_s_x; static PyObject *__pyx_n_s_xp; static PyObject *__pyx_n_s_y; static PyObject *__pyx_n_s_yp; static PyObject *__pyx_n_s_z1; static PyObject *__pyx_n_s_z2; static PyObject *__pyx_n_s_zp; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__27; static PyObject *__pyx_codeobj__8; static PyObject *__pyx_codeobj__10; static PyObject *__pyx_codeobj__12; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; static PyObject *__pyx_codeobj__18; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__22; static PyObject *__pyx_codeobj__24; static PyObject *__pyx_codeobj__26; static PyObject *__pyx_codeobj__28; /* * "fatiando/gravmag/_polyprism.pyx":18 ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, double __pyx_v_Z1_sqr, double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Qk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ak2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Bk2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E1k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_E2k2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Ck2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* * "fatiando/gravmag/_polyprism.pyx":24 Qk1, Qk2, Ak1, Ak2, R1k1, R1k2, * R2k1, R2k2, Bk1, Bk2, E1k1, \ E1k2, E2k1, E2k2, Ck1, Ck2 DTYPE_T dummy * = 1e-10 # Used to avoid singularities # <<<<<<<<<<<<<< * kernel = 0 p = X1*Y2 - X2*Y1 */ __pyx_v_dummy = 1e-10; /* * "fatiando/gravmag/_polyprism.pyx":25 E1k2, E2k1, E2k2, Ck1, Ck2 * DTYPE_T dummy = 1e-10 # Used to avoid singularities kernel = 0 * # <<<<<<<<<<<<<< p = X1*Y2 - X2*Y1 p_sqr = p**2 */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":26 DTYPE_T dummy = 1e-10 # Used to * avoid singularities kernel = 0 p = X1*Y2 - X2*Y1 # * <<<<<<<<<<<<<< p_sqr = p**2 Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 */ __pyx_v_p = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":27 kernel = 0 p = X1*Y2 - X2*Y1 * p_sqr = p**2 # <<<<<<<<<<<<<< Qk1 = (Y2 - Y1)*Y1 + (X2 - * X1)*X1 Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 */ __pyx_v_p_sqr = pow(__pyx_v_p, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":28 p = X1*Y2 - X2*Y1 p_sqr = p**2 * Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 # <<<<<<<<<<<<<< Qk2 = * (Y2 - Y1)*Y2 + (X2 - X1)*X2 Ak1 = X1**2 + Y1**2 */ __pyx_v_Qk1 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y1) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X1)); /* * "fatiando/gravmag/_polyprism.pyx":29 p_sqr = p**2 Qk1 = (Y2 - Y1)*Y1 + * (X2 - X1)*X1 Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 # * <<<<<<<<<<<<<< Ak1 = X1**2 + Y1**2 Ak2 = X2**2 + Y2**2 */ __pyx_v_Qk2 = (((__pyx_v_Y2 - __pyx_v_Y1) * __pyx_v_Y2) + ((__pyx_v_X2 - __pyx_v_X1) * __pyx_v_X2)); /* * "fatiando/gravmag/_polyprism.pyx":30 Qk1 = (Y2 - Y1)*Y1 + (X2 - X1)*X1 * Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 Ak1 = X1**2 + Y1**2 # * <<<<<<<<<<<<<< Ak2 = X2**2 + Y2**2 R1k1 = sqrt(Ak1 + Z1_sqr) */ __pyx_v_Ak1 = (pow(__pyx_v_X1, 2.0) + pow(__pyx_v_Y1, 2.0)); /* * "fatiando/gravmag/_polyprism.pyx":31 Qk2 = (Y2 - Y1)*Y2 + (X2 - X1)*X2 * Ak1 = X1**2 + Y1**2 Ak2 = X2**2 + Y2**2 # <<<<<<<<<<<<<< * R1k1 = sqrt(Ak1 + Z1_sqr) R1k2 = sqrt(Ak2 + Z1_sqr) */ __pyx_v_Ak2 = (pow(__pyx_v_X2, 2.0) + pow(__pyx_v_Y2, 2.0)); /* * "fatiando/gravmag/_polyprism.pyx":32 Ak1 = X1**2 + Y1**2 Ak2 = X2**2 + * Y2**2 R1k1 = sqrt(Ak1 + Z1_sqr) # <<<<<<<<<<<<<< R1k2 = * sqrt(Ak2 + Z1_sqr) R2k1 = sqrt(Ak1 + Z2_sqr) */ __pyx_v_R1k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z1_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":33 Ak2 = X2**2 + Y2**2 R1k1 = * sqrt(Ak1 + Z1_sqr) R1k2 = sqrt(Ak2 + Z1_sqr) # * <<<<<<<<<<<<<< R2k1 = sqrt(Ak1 + Z2_sqr) R2k2 = sqrt(Ak2 + Z2_sqr) */ __pyx_v_R1k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z1_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":34 R1k1 = sqrt(Ak1 + Z1_sqr) R1k2 = * sqrt(Ak2 + Z1_sqr) R2k1 = sqrt(Ak1 + Z2_sqr) # * <<<<<<<<<<<<<< R2k2 = sqrt(Ak2 + Z2_sqr) Ak1 = sqrt(Ak1) */ __pyx_v_R2k1 = sqrt((__pyx_v_Ak1 + __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":35 R1k2 = sqrt(Ak2 + Z1_sqr) R2k1 = * sqrt(Ak1 + Z2_sqr) R2k2 = sqrt(Ak2 + Z2_sqr) # * <<<<<<<<<<<<<< Ak1 = sqrt(Ak1) Ak2 = sqrt(Ak2) */ __pyx_v_R2k2 = sqrt((__pyx_v_Ak2 + __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":36 R2k1 = sqrt(Ak1 + Z2_sqr) R2k2 = * sqrt(Ak2 + Z2_sqr) Ak1 = sqrt(Ak1) # <<<<<<<<<<<<<< Ak2 = * sqrt(Ak2) Bk1 = sqrt(Qk1**2 + p_sqr) */ __pyx_v_Ak1 = sqrt(__pyx_v_Ak1); /* * "fatiando/gravmag/_polyprism.pyx":37 R2k2 = sqrt(Ak2 + Z2_sqr) Ak1 = * sqrt(Ak1) Ak2 = sqrt(Ak2) # <<<<<<<<<<<<<< Bk1 = * sqrt(Qk1**2 + p_sqr) Bk2 = sqrt(Qk2**2 + p_sqr) */ __pyx_v_Ak2 = sqrt(__pyx_v_Ak2); /* * "fatiando/gravmag/_polyprism.pyx":38 Ak1 = sqrt(Ak1) Ak2 = sqrt(Ak2) * Bk1 = sqrt(Qk1**2 + p_sqr) # <<<<<<<<<<<<<< Bk2 = * sqrt(Qk2**2 + p_sqr) E1k1 = R1k1*Bk1 */ __pyx_v_Bk1 = sqrt((pow(__pyx_v_Qk1, 2.0) + __pyx_v_p_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":39 Ak2 = sqrt(Ak2) Bk1 = sqrt(Qk1**2 * + p_sqr) Bk2 = sqrt(Qk2**2 + p_sqr) # <<<<<<<<<<<<<< E1k1 * = R1k1*Bk1 E1k2 = R1k2*Bk2 */ __pyx_v_Bk2 = sqrt((pow(__pyx_v_Qk2, 2.0) + __pyx_v_p_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":40 Bk1 = sqrt(Qk1**2 + p_sqr) Bk2 = * sqrt(Qk2**2 + p_sqr) E1k1 = R1k1*Bk1 # <<<<<<<<<<<<<< E1k2 * = R1k2*Bk2 E2k1 = R2k1*Bk1 */ __pyx_v_E1k1 = (__pyx_v_R1k1 * __pyx_v_Bk1); /* * "fatiando/gravmag/_polyprism.pyx":41 Bk2 = sqrt(Qk2**2 + p_sqr) E1k1 = * R1k1*Bk1 E1k2 = R1k2*Bk2 # <<<<<<<<<<<<<< E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 */ __pyx_v_E1k2 = (__pyx_v_R1k2 * __pyx_v_Bk2); /* * "fatiando/gravmag/_polyprism.pyx":42 E1k1 = R1k1*Bk1 E1k2 = R1k2*Bk2 * E2k1 = R2k1*Bk1 # <<<<<<<<<<<<<< E2k2 = R2k2*Bk2 kernel += * (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) */ __pyx_v_E2k1 = (__pyx_v_R2k1 * __pyx_v_Bk1); /* * "fatiando/gravmag/_polyprism.pyx":43 E1k2 = R1k2*Bk2 E2k1 = R2k1*Bk1 * E2k2 = R2k2*Bk2 # <<<<<<<<<<<<<< kernel += (Z2 - * Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) kernel += Z2*(atan2(Z2*Qk1, * R2k1*p) - atan2(Z2*Qk2, R2k2*p)) */ __pyx_v_E2k2 = (__pyx_v_R2k2 * __pyx_v_Bk2); /* * "fatiando/gravmag/_polyprism.pyx":44 E2k1 = R2k1*Bk1 E2k2 = R2k2*Bk2 * kernel += (Z2 - Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) # * <<<<<<<<<<<<<< kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - atan2(Z2*Qk2, * R2k2*p)) kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) */ __pyx_v_kernel = (__pyx_v_kernel + ((__pyx_v_Z2 - __pyx_v_Z1) * (atan2(__pyx_v_Qk2, __pyx_v_p) - atan2(__pyx_v_Qk1, __pyx_v_p)))); /* * "fatiando/gravmag/_polyprism.pyx":45 E2k2 = R2k2*Bk2 kernel += (Z2 - * Z1)*(atan2(Qk2, p) - atan2(Qk1, p)) kernel += Z2*(atan2(Z2*Qk1, * R2k1*p) - atan2(Z2*Qk2, R2k2*p)) # <<<<<<<<<<<<<< kernel * += Z1*(atan2(Z1*Qk2, R1k2*p) - atan2(Z1*Qk1, R1k1*p)) Ck1 = Qk1*Ak1 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z2 * (atan2((__pyx_v_Z2 * __pyx_v_Qk1), (__pyx_v_R2k1 * __pyx_v_p)) - atan2((__pyx_v_Z2 * __pyx_v_Qk2), (__pyx_v_R2k2 * __pyx_v_p))))); /* * "fatiando/gravmag/_polyprism.pyx":46 kernel += (Z2 - Z1)*(atan2(Qk2, * p) - atan2(Qk1, p)) kernel += Z2*(atan2(Z2*Qk1, R2k1*p) - * atan2(Z2*Qk2, R2k2*p)) kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - * atan2(Z1*Qk1, R1k1*p)) # <<<<<<<<<<<<<< Ck1 = Qk1*Ak1 Ck2 * = Qk2*Ak2 */ __pyx_v_kernel = (__pyx_v_kernel + (__pyx_v_Z1 * (atan2((__pyx_v_Z1 * __pyx_v_Qk2), (__pyx_v_R1k2 * __pyx_v_p)) - atan2((__pyx_v_Z1 * __pyx_v_Qk1), (__pyx_v_R1k1 * __pyx_v_p))))); /* * "fatiando/gravmag/_polyprism.pyx":47 kernel += Z2*(atan2(Z2*Qk1, * R2k1*p) - atan2(Z2*Qk2, R2k2*p)) kernel += Z1*(atan2(Z1*Qk2, R1k2*p) - * atan2(Z1*Qk1, R1k1*p)) Ck1 = Qk1*Ak1 # <<<<<<<<<<<<<< Ck2 * = Qk2*Ak2 # dummy helps prevent zero division errors */ __pyx_v_Ck1 = (__pyx_v_Qk1 * __pyx_v_Ak1); /* * "fatiando/gravmag/_polyprism.pyx":48 kernel += Z1*(atan2(Z1*Qk2, * R1k2*p) - atan2(Z1*Qk1, R1k1*p)) Ck1 = Qk1*Ak1 Ck2 = Qk2*Ak2 * # <<<<<<<<<<<<<< # dummy helps prevent zero division errors kernel += * 0.5*p*(Ak1/(Bk1 + dummy))*( */ __pyx_v_Ck2 = (__pyx_v_Qk2 * __pyx_v_Ak2); /* * "fatiando/gravmag/_polyprism.pyx":50 Ck2 = Qk2*Ak2 # dummy helps * prevent zero division errors kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * # <<<<<<<<<<<<<< log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_t_1 = (__pyx_v_Bk1 + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "fatiando/gravmag/_polyprism.pyx":51 # dummy helps prevent zero * division errors kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( log((E1k1 - * Ck1)/(E1k1 + Ck1 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) kernel += * 0.5*p*(Ak2/(Bk2 + dummy))*( */ __pyx_t_2 = (__pyx_v_E1k1 - __pyx_v_Ck1); __pyx_t_3 = ((__pyx_v_E1k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "fatiando/gravmag/_polyprism.pyx":52 kernel += 0.5*p*(Ak1/(Bk1 + * dummy))*( log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - log((E2k1 - * Ck1)/(E2k1 + Ck1 + dummy) + dummy)) # <<<<<<<<<<<<<< * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( log((E2k2 - Ck2)/(E2k2 + Ck2 + * dummy) + dummy) - */ __pyx_t_4 = (__pyx_v_E2k1 - __pyx_v_Ck1); __pyx_t_5 = ((__pyx_v_E2k1 + __pyx_v_Ck1) + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "fatiando/gravmag/_polyprism.pyx":50 Ck2 = Qk2*Ak2 # dummy helps * prevent zero division errors kernel += 0.5*p*(Ak1/(Bk1 + dummy))*( * # <<<<<<<<<<<<<< log((E1k1 - Ck1)/(E1k1 + Ck1 + dummy) + dummy) - * log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak1 / __pyx_t_1)) * (log(((__pyx_t_2 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_4 / __pyx_t_5) + __pyx_v_dummy))))); /* * "fatiando/gravmag/_polyprism.pyx":53 log((E1k1 - Ck1)/(E1k1 + Ck1 + * dummy) + dummy) - log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - log((E1k2 - * Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_t_5 = (__pyx_v_Bk2 + __pyx_v_dummy); if (unlikely(__pyx_t_5 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "fatiando/gravmag/_polyprism.pyx":54 log((E2k1 - Ck1)/(E2k1 + Ck1 + * dummy) + dummy)) kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( log((E2k2 - * Ck2)/(E2k2 + Ck2 + dummy) + dummy) - # <<<<<<<<<<<<<< * log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) return kernel */ __pyx_t_4 = (__pyx_v_E2k2 - __pyx_v_Ck2); __pyx_t_3 = ((__pyx_v_E2k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_3 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "fatiando/gravmag/_polyprism.pyx":55 kernel += 0.5*p*(Ak2/(Bk2 + * dummy))*( log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - log((E1k2 - * Ck2)/(E1k2 + Ck2 + dummy) + dummy)) # <<<<<<<<<<<<<< * return kernel * */ __pyx_t_2 = (__pyx_v_E1k2 - __pyx_v_Ck2); __pyx_t_1 = ((__pyx_v_E1k2 + __pyx_v_Ck2) + __pyx_v_dummy); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 55; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "fatiando/gravmag/_polyprism.pyx":53 log((E1k1 - Ck1)/(E1k1 + Ck1 + * dummy) + dummy) - log((E2k1 - Ck1)/(E2k1 + Ck1 + dummy) + dummy)) * kernel += 0.5*p*(Ak2/(Bk2 + dummy))*( # <<<<<<<<<<<<<< * log((E2k2 - Ck2)/(E2k2 + Ck2 + dummy) + dummy) - log((E1k2 - * Ck2)/(E1k2 + Ck2 + dummy) + dummy)) */ __pyx_v_kernel = (__pyx_v_kernel + (((0.5 * __pyx_v_p) * (__pyx_v_Ak2 / __pyx_t_5)) * (log(((__pyx_t_4 / __pyx_t_3) + __pyx_v_dummy)) - log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy))))); /* * "fatiando/gravmag/_polyprism.pyx":56 log((E2k2 - Ck2)/(E2k2 + Ck2 + * dummy) + dummy) - log((E1k2 - Ck2)/(E1k2 + Ck2 + dummy) + dummy)) * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* * "fatiando/gravmag/_polyprism.pyx":18 ctypedef numpy.float_t DTYPE_T * * cdef inline double kernelz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":58 return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* * "fatiando/gravmag/_polyprism.pyx":65 aux14, aux15, aux16, n, g, p, d1, * d2, \ R11, R12, R21, R22, res DTYPE_T dummy = 1e-10 # Used to avoid * singularities # <<<<<<<<<<<<<< kernel = 0 aux0 = X2 - X1 + * dummy */ __pyx_v_dummy = 1e-10; /* * "fatiando/gravmag/_polyprism.pyx":66 R11, R12, R21, R22, res DTYPE_T * dummy = 1e-10 # Used to avoid singularities kernel = 0 # * <<<<<<<<<<<<<< aux0 = X2 - X1 + dummy aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":67 DTYPE_T dummy = 1e-10 # Used to * avoid singularities kernel = 0 aux0 = X2 - X1 + dummy # * <<<<<<<<<<<<<< aux1 = Y2 - Y1 + dummy n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":68 kernel = 0 aux0 = X2 - X1 + dummy * aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< n = (aux0/aux1) g * = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":69 aux0 = X2 - X1 + dummy aux1 = Y2 * - Y1 + dummy n = (aux0/aux1) # <<<<<<<<<<<<<< g = X1 - * (Y1*n) aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* * "fatiando/gravmag/_polyprism.pyx":70 aux1 = Y2 - Y1 + dummy n = * (aux0/aux1) g = X1 - (Y1*n) # <<<<<<<<<<<<<< aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* * "fatiando/gravmag/_polyprism.pyx":71 n = (aux0/aux1) g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* * "fatiando/gravmag/_polyprism.pyx":72 g = X1 - (Y1*n) aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) # * <<<<<<<<<<<<<< p = ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":73 aux2 = sqrt((aux0*aux0) + * (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy * # <<<<<<<<<<<<<< aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + * (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":74 aux3 = (X1*Y2) - (X2*Y1) p = * ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) # * <<<<<<<<<<<<<< aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":75 p = ((aux3/aux2)) + dummy aux4 = * (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) # * <<<<<<<<<<<<<< d1 = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":76 aux4 = (aux0*X1) + (aux1*Y1) aux5 * = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy # * <<<<<<<<<<<<<< d2 = ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 76; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":77 aux5 = (aux0*X2) + (aux1*Y2) d1 = * ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy # * <<<<<<<<<<<<<< aux6 = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":78 d1 = ((aux4/aux2)) + dummy d2 = * ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) # * <<<<<<<<<<<<<< aux7 = (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":79 d2 = ((aux5/aux2)) + dummy aux6 = * (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) # * <<<<<<<<<<<<<< aux8 = Z1*Z1 aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":80 aux6 = (X1*X1) + (Y1*Y1) aux7 = * (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 # <<<<<<<<<<<<<< aux9 = * Z2*Z2 R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* * "fatiando/gravmag/_polyprism.pyx":81 aux7 = (X2*X2) + (Y2*Y2) aux8 = * Z1*Z1 aux9 = Z2*Z2 # <<<<<<<<<<<<<< R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* * "fatiando/gravmag/_polyprism.pyx":82 aux8 = Z1*Z1 aux9 = Z2*Z2 R11 = * sqrt(aux6 + aux8) # <<<<<<<<<<<<<< R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":83 aux9 = Z2*Z2 R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":84 R11 = sqrt(aux6 + aux8) R12 = * sqrt(aux6 + aux9) R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":85 R12 = sqrt(aux6 + aux9) R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":86 R21 = sqrt(aux7 + aux8) R22 = * sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) # * <<<<<<<<<<<<<< aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* * "fatiando/gravmag/_polyprism.pyx":87 R22 = sqrt(aux7 + aux9) aux10 = * atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) # * <<<<<<<<<<<<<< aux12 = aux10 - aux11 aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* * "fatiando/gravmag/_polyprism.pyx":88 aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< aux13 = (aux12/(p*d2)) aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":89 aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 aux13 = (aux12/(p*d2)) # * <<<<<<<<<<<<<< aux14 = ((p*aux12)/d2) res = (g*Y2*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":90 aux12 = aux10 - aux11 aux13 = * (aux12/(p*d2)) aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< res * = (g*Y2*aux13) + (n*aux14) aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* * "fatiando/gravmag/_polyprism.pyx":91 aux13 = (aux12/(p*d2)) aux14 = * ((p*aux12)/d2) res = (g*Y2*aux13) + (n*aux14) # * <<<<<<<<<<<<<< aux10 = atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), * (p*R11)) */ __pyx_v_res = (((__pyx_v_g * __pyx_v_Y2) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14)); /* * "fatiando/gravmag/_polyprism.pyx":92 aux14 = ((p*aux12)/d2) res = * (g*Y2*aux13) + (n*aux14) aux10 = atan2((Z2*d1), (p*R12)) # * <<<<<<<<<<<<<< aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* * "fatiando/gravmag/_polyprism.pyx":93 res = (g*Y2*aux13) + (n*aux14) * aux10 = atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), (p*R11)) * # <<<<<<<<<<<<<< aux12 = aux10 - aux11 aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* * "fatiando/gravmag/_polyprism.pyx":94 aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< aux13 = (aux12/(p*d1)) aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":95 aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 aux13 = (aux12/(p*d1)) # * <<<<<<<<<<<<<< aux14 = ((p*aux12)/d1) res -= (g*Y1*aux13) + (n*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 95; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":96 aux12 = aux10 - aux11 aux13 = * (aux12/(p*d1)) aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< res * -= (g*Y1*aux13) + (n*aux14) aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* * "fatiando/gravmag/_polyprism.pyx":97 aux13 = (aux12/(p*d1)) aux14 = * ((p*aux12)/d1) res -= (g*Y1*aux13) + (n*aux14) # * <<<<<<<<<<<<<< aux10 = log(((Z2 + R22) + dummy)) aux11 = log(((Z1 + * R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_g * __pyx_v_Y1) * __pyx_v_aux13) + (__pyx_v_n * __pyx_v_aux14))); /* * "fatiando/gravmag/_polyprism.pyx":98 aux14 = ((p*aux12)/d1) res -= * (g*Y1*aux13) + (n*aux14) aux10 = log(((Z2 + R22) + dummy)) * # <<<<<<<<<<<<<< aux11 = log(((Z1 + R21) + dummy)) aux12 = log(((Z2 + * R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":99 res -= (g*Y1*aux13) + (n*aux14) * aux10 = log(((Z2 + R22) + dummy)) aux11 = log(((Z1 + R21) + dummy)) * # <<<<<<<<<<<<<< aux12 = log(((Z2 + R12) + dummy)) aux13 = log(((Z1 + * R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":100 aux10 = log(((Z2 + R22) + * dummy)) aux11 = log(((Z1 + R21) + dummy)) aux12 = log(((Z2 + R12) + * dummy)) # <<<<<<<<<<<<<< aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":101 aux11 = log(((Z1 + R21) + * dummy)) aux12 = log(((Z2 + R12) + dummy)) aux13 = log(((Z1 + R11) + * dummy)) # <<<<<<<<<<<<<< aux14 = aux10 - aux11 aux15 = * aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":102 aux12 = log(((Z2 + R12) + * dummy)) aux13 = log(((Z1 + R11) + dummy)) aux14 = aux10 - aux11 * # <<<<<<<<<<<<<< aux15 = aux12 - aux13 res += (n*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":103 aux13 = log(((Z1 + R11) + * dummy)) aux14 = aux10 - aux11 aux15 = aux12 - aux13 # * <<<<<<<<<<<<<< res += (n*(aux15 - aux14)) aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* * "fatiando/gravmag/_polyprism.pyx":104 aux14 = aux10 - aux11 aux15 = * aux12 - aux13 res += (n*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux0 = (1.0/(1.0 + (n*n))) res *= -aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_n * (__pyx_v_aux15 - __pyx_v_aux14))); /* * "fatiando/gravmag/_polyprism.pyx":105 aux15 = aux12 - aux13 res += * (n*(aux15 - aux14)) aux0 = (1.0/(1.0 + (n*n))) # * <<<<<<<<<<<<<< res *= -aux0 kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":106 res += (n*(aux15 - aux14)) aux0 * = (1.0/(1.0 + (n*n))) res *= -aux0 # <<<<<<<<<<<<<< kernel * += res return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* * "fatiando/gravmag/_polyprism.pyx":107 aux0 = (1.0/(1.0 + (n*n))) res * *= -aux0 kernel += res # <<<<<<<<<<<<<< return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* * "fatiando/gravmag/_polyprism.pyx":108 res *= -aux0 kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* * "fatiando/gravmag/_polyprism.pyx":58 return kernel * * cdef inline double kernelxx(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxx", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":110 return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* * "fatiando/gravmag/_polyprism.pyx":117 aux14, aux15, aux16, n, g, p, * d1, d2, \ R11, R12, R21, R22, res DTYPE_T dummy = 1e-10 # Used to * avoid singularities # <<<<<<<<<<<<<< kernel = 0 aux0 = X2 * - X1 + dummy */ __pyx_v_dummy = 1e-10; /* * "fatiando/gravmag/_polyprism.pyx":118 R11, R12, R21, R22, res DTYPE_T * dummy = 1e-10 # Used to avoid singularities kernel = 0 # * <<<<<<<<<<<<<< aux0 = X2 - X1 + dummy aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":119 DTYPE_T dummy = 1e-10 # Used to * avoid singularities kernel = 0 aux0 = X2 - X1 + dummy # * <<<<<<<<<<<<<< aux1 = Y2 - Y1 + dummy n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":120 kernel = 0 aux0 = X2 - X1 + * dummy aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< n = * (aux0/aux1) g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":121 aux0 = X2 - X1 + dummy aux1 = Y2 * - Y1 + dummy n = (aux0/aux1) # <<<<<<<<<<<<<< g = X1 - * (Y1*n) aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* * "fatiando/gravmag/_polyprism.pyx":122 aux1 = Y2 - Y1 + dummy n = * (aux0/aux1) g = X1 - (Y1*n) # <<<<<<<<<<<<<< aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* * "fatiando/gravmag/_polyprism.pyx":123 n = (aux0/aux1) g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* * "fatiando/gravmag/_polyprism.pyx":124 g = X1 - (Y1*n) aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) # * <<<<<<<<<<<<<< p = ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":125 aux2 = sqrt((aux0*aux0) + * (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy * # <<<<<<<<<<<<<< aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + * (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":126 aux3 = (X1*Y2) - (X2*Y1) p = * ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) # * <<<<<<<<<<<<<< aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":127 p = ((aux3/aux2)) + dummy aux4 = * (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) # * <<<<<<<<<<<<<< d1 = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":128 aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy # * <<<<<<<<<<<<<< d2 = ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":129 aux5 = (aux0*X2) + (aux1*Y2) d1 * = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy # * <<<<<<<<<<<<<< aux6 = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":130 d1 = ((aux4/aux2)) + dummy d2 = * ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) # * <<<<<<<<<<<<<< aux7 = (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":131 d2 = ((aux5/aux2)) + dummy aux6 * = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) # * <<<<<<<<<<<<<< aux8 = Z1*Z1 aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":132 aux6 = (X1*X1) + (Y1*Y1) aux7 = * (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 # <<<<<<<<<<<<<< aux9 = * Z2*Z2 R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* * "fatiando/gravmag/_polyprism.pyx":133 aux7 = (X2*X2) + (Y2*Y2) aux8 = * Z1*Z1 aux9 = Z2*Z2 # <<<<<<<<<<<<<< R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* * "fatiando/gravmag/_polyprism.pyx":134 aux8 = Z1*Z1 aux9 = Z2*Z2 R11 = * sqrt(aux6 + aux8) # <<<<<<<<<<<<<< R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":135 aux9 = Z2*Z2 R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":136 R11 = sqrt(aux6 + aux8) R12 = * sqrt(aux6 + aux9) R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":137 R12 = sqrt(aux6 + aux9) R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":138 R21 = sqrt(aux7 + aux8) R22 = * sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) # * <<<<<<<<<<<<<< aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* * "fatiando/gravmag/_polyprism.pyx":139 R22 = sqrt(aux7 + aux9) aux10 = * atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) # * <<<<<<<<<<<<<< aux12 = aux10 - aux11 aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* * "fatiando/gravmag/_polyprism.pyx":140 aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< aux13 = (aux12/(p*d2)) aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":141 aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 aux13 = (aux12/(p*d2)) # * <<<<<<<<<<<<<< aux14 = ((p*aux12)/d2) res = (((g*g) + (g*n*Y2))*aux13) * - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":142 aux12 = aux10 - aux11 aux13 = * (aux12/(p*d2)) aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< res * = (((g*g) + (g*n*Y2))*aux13) - aux14 aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* * "fatiando/gravmag/_polyprism.pyx":143 aux13 = (aux12/(p*d2)) aux14 = * ((p*aux12)/d2) res = (((g*g) + (g*n*Y2))*aux13) - aux14 # * <<<<<<<<<<<<<< aux10 = atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), * (p*R11)) */ __pyx_v_res = ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y2)) * __pyx_v_aux13) - __pyx_v_aux14); /* * "fatiando/gravmag/_polyprism.pyx":144 aux14 = ((p*aux12)/d2) res = * (((g*g) + (g*n*Y2))*aux13) - aux14 aux10 = atan2((Z2*d1), (p*R12)) * # <<<<<<<<<<<<<< aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* * "fatiando/gravmag/_polyprism.pyx":145 res = (((g*g) + (g*n*Y2))*aux13) * - aux14 aux10 = atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), * (p*R11)) # <<<<<<<<<<<<<< aux12 = aux10 - aux11 aux13 = * (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* * "fatiando/gravmag/_polyprism.pyx":146 aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< aux13 = (aux12/(p*d1)) aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":147 aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 aux13 = (aux12/(p*d1)) # * <<<<<<<<<<<<<< aux14 = ((p*aux12)/d1) res -= (((g*g) + * (g*n*Y1))*aux13) - aux14 */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":148 aux12 = aux10 - aux11 aux13 = * (aux12/(p*d1)) aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< res * -= (((g*g) + (g*n*Y1))*aux13) - aux14 aux10 = log(((Z2 + R22) + * dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 148; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* * "fatiando/gravmag/_polyprism.pyx":149 aux13 = (aux12/(p*d1)) aux14 = * ((p*aux12)/d1) res -= (((g*g) + (g*n*Y1))*aux13) - aux14 # * <<<<<<<<<<<<<< aux10 = log(((Z2 + R22) + dummy)) aux11 = log(((Z1 + * R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - ((((__pyx_v_g * __pyx_v_g) + ((__pyx_v_g * __pyx_v_n) * __pyx_v_Y1)) * __pyx_v_aux13) - __pyx_v_aux14)); /* * "fatiando/gravmag/_polyprism.pyx":150 aux14 = ((p*aux12)/d1) res -= * (((g*g) + (g*n*Y1))*aux13) - aux14 aux10 = log(((Z2 + R22) + dummy)) * # <<<<<<<<<<<<<< aux11 = log(((Z1 + R21) + dummy)) aux12 = log(((Z2 + * R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":151 res -= (((g*g) + * (g*n*Y1))*aux13) - aux14 aux10 = log(((Z2 + R22) + dummy)) aux11 = * log(((Z1 + R21) + dummy)) # <<<<<<<<<<<<<< aux12 = * log(((Z2 + R12) + dummy)) aux13 = log(((Z1 + R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":152 aux10 = log(((Z2 + R22) + * dummy)) aux11 = log(((Z1 + R21) + dummy)) aux12 = log(((Z2 + R12) + * dummy)) # <<<<<<<<<<<<<< aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":153 aux11 = log(((Z1 + R21) + * dummy)) aux12 = log(((Z2 + R12) + dummy)) aux13 = log(((Z1 + R11) + * dummy)) # <<<<<<<<<<<<<< aux14 = aux10 - aux11 aux15 = * aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":154 aux12 = log(((Z2 + R12) + * dummy)) aux13 = log(((Z1 + R11) + dummy)) aux14 = aux10 - aux11 * # <<<<<<<<<<<<<< aux15 = aux12 - aux13 res += (aux14 - aux15) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":155 aux13 = log(((Z1 + R11) + * dummy)) aux14 = aux10 - aux11 aux15 = aux12 - aux13 # * <<<<<<<<<<<<<< res += (aux14 - aux15) aux0 = (1.0/(1.0 + (n*n))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* * "fatiando/gravmag/_polyprism.pyx":156 aux14 = aux10 - aux11 aux15 = * aux12 - aux13 res += (aux14 - aux15) # <<<<<<<<<<<<<< aux0 * = (1.0/(1.0 + (n*n))) res *= aux0 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_aux14 - __pyx_v_aux15)); /* * "fatiando/gravmag/_polyprism.pyx":157 aux15 = aux12 - aux13 res += * (aux14 - aux15) aux0 = (1.0/(1.0 + (n*n))) # * <<<<<<<<<<<<<< res *= aux0 kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":158 res += (aux14 - aux15) aux0 = * (1.0/(1.0 + (n*n))) res *= aux0 # <<<<<<<<<<<<<< kernel += * res return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux0); /* * "fatiando/gravmag/_polyprism.pyx":159 aux0 = (1.0/(1.0 + (n*n))) res * *= aux0 kernel += res # <<<<<<<<<<<<<< return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* * "fatiando/gravmag/_polyprism.pyx":160 res *= aux0 kernel += res return * kernel # <<<<<<<<<<<<<< * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* * "fatiando/gravmag/_polyprism.pyx":110 return kernel * * cdef inline double kernelxy(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":162 return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_n; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_g; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* * "fatiando/gravmag/_polyprism.pyx":169 aux14, aux15, aux16, n, g, d1, * d2, \ R11, R12, R21, R22, res DTYPE_T dummy = 1e-10 # Used to avoid * singularities # <<<<<<<<<<<<<< kernel = 0 aux0 = X2 - X1 + * dummy */ __pyx_v_dummy = 1e-10; /* * "fatiando/gravmag/_polyprism.pyx":170 R11, R12, R21, R22, res DTYPE_T * dummy = 1e-10 # Used to avoid singularities kernel = 0 # * <<<<<<<<<<<<<< aux0 = X2 - X1 + dummy aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":171 DTYPE_T dummy = 1e-10 # Used to * avoid singularities kernel = 0 aux0 = X2 - X1 + dummy # * <<<<<<<<<<<<<< aux1 = Y2 - Y1 + dummy n = (aux0/aux1) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":172 kernel = 0 aux0 = X2 - X1 + * dummy aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< n = * (aux0/aux1) g = X1 - (Y1*n) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":173 aux0 = X2 - X1 + dummy aux1 = Y2 * - Y1 + dummy n = (aux0/aux1) # <<<<<<<<<<<<<< g = X1 - * (Y1*n) aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 173; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_n = (__pyx_v_aux0 / __pyx_v_aux1); /* * "fatiando/gravmag/_polyprism.pyx":174 aux1 = Y2 - Y1 + dummy n = * (aux0/aux1) g = X1 - (Y1*n) # <<<<<<<<<<<<<< aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_g = (__pyx_v_X1 - (__pyx_v_Y1 * __pyx_v_n)); /* * "fatiando/gravmag/_polyprism.pyx":175 n = (aux0/aux1) g = X1 - (Y1*n) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* * "fatiando/gravmag/_polyprism.pyx":176 g = X1 - (Y1*n) aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux4 = (aux0*X1) + (aux1*Y1) * # <<<<<<<<<<<<<< aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + * dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":177 aux2 = sqrt((aux0*aux0) + * (aux1*aux1)) aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) * # <<<<<<<<<<<<<< d1 = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":178 aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy # * <<<<<<<<<<<<<< d2 = ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":179 aux5 = (aux0*X2) + (aux1*Y2) d1 * = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy # * <<<<<<<<<<<<<< aux6 = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 179; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":180 d1 = ((aux4/aux2)) + dummy d2 = * ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) # * <<<<<<<<<<<<<< aux7 = (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":181 d2 = ((aux5/aux2)) + dummy aux6 * = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) # * <<<<<<<<<<<<<< aux8 = Z1*Z1 aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":182 aux6 = (X1*X1) + (Y1*Y1) aux7 = * (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 # <<<<<<<<<<<<<< aux9 = * Z2*Z2 R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* * "fatiando/gravmag/_polyprism.pyx":183 aux7 = (X2*X2) + (Y2*Y2) aux8 = * Z1*Z1 aux9 = Z2*Z2 # <<<<<<<<<<<<<< R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* * "fatiando/gravmag/_polyprism.pyx":184 aux8 = Z1*Z1 aux9 = Z2*Z2 R11 = * sqrt(aux6 + aux8) # <<<<<<<<<<<<<< R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":185 aux9 = Z2*Z2 R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":186 R11 = sqrt(aux6 + aux8) R12 = * sqrt(aux6 + aux9) R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":187 R12 = sqrt(aux6 + aux9) R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) aux11 = log((((R12 - * d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":188 R21 = sqrt(aux7 + aux8) R22 = * sqrt(aux7 + aux9) aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * # <<<<<<<<<<<<<< aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) aux12 * = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 188; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":189 R22 = sqrt(aux7 + aux9) aux10 = * log((((R11 - d1)/(R11 + d1)) + dummy)) aux11 = log((((R12 - d1)/(R12 + * d1)) + dummy)) # <<<<<<<<<<<<<< aux12 = log((((R21 - * d2)/(R21 + d2)) + dummy)) aux13 = log((((R22 - d2)/(R22 + d2)) + * dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 189; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":190 aux10 = log((((R11 - d1)/(R11 + * d1)) + dummy)) aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) aux12 = * log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":191 aux11 = log((((R12 - d1)/(R12 + * d1)) + dummy)) aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) aux13 = * log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 191; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":192 aux12 = log((((R21 - d2)/(R21 + * d2)) + dummy)) aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) aux14 = * (1.0/(2*d1)) # <<<<<<<<<<<<<< aux15 = (1.0/(2*d2)) aux16 = * aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 192; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":193 aux13 = log((((R22 - d2)/(R22 + * d2)) + dummy)) aux14 = (1.0/(2*d1)) aux15 = (1.0/(2*d2)) # * <<<<<<<<<<<<<< aux16 = aux15*(aux13 - aux12) res = (Y2*(1.0 + (n*n)) + * g*n)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 193; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":194 aux14 = (1.0/(2*d1)) aux15 = * (1.0/(2*d2)) aux16 = aux15*(aux13 - aux12) # * <<<<<<<<<<<<<< res = (Y2*(1.0 + (n*n)) + g*n)*aux16 aux16 = * aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* * "fatiando/gravmag/_polyprism.pyx":195 aux15 = (1.0/(2*d2)) aux16 = * aux15*(aux13 - aux12) res = (Y2*(1.0 + (n*n)) + g*n)*aux16 * # <<<<<<<<<<<<<< aux16 = aux14*(aux11 - aux10) res -= (Y1*(1.0 + * (n*n)) + g*n)*aux16 */ __pyx_v_res = (((__pyx_v_Y2 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16); /* * "fatiando/gravmag/_polyprism.pyx":196 aux16 = aux15*(aux13 - aux12) * res = (Y2*(1.0 + (n*n)) + g*n)*aux16 aux16 = aux14*(aux11 - aux10) * # <<<<<<<<<<<<<< res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 aux0 = * (1.0/(1.0 + (n*n))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* * "fatiando/gravmag/_polyprism.pyx":197 res = (Y2*(1.0 + (n*n)) + * g*n)*aux16 aux16 = aux14*(aux11 - aux10) res -= (Y1*(1.0 + (n*n)) + * g*n)*aux16 # <<<<<<<<<<<<<< aux0 = (1.0/(1.0 + (n*n))) res * *= -aux0 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_Y1 * (1.0 + (__pyx_v_n * __pyx_v_n))) + (__pyx_v_g * __pyx_v_n)) * __pyx_v_aux16)); /* * "fatiando/gravmag/_polyprism.pyx":198 aux16 = aux14*(aux11 - aux10) * res -= (Y1*(1.0 + (n*n)) + g*n)*aux16 aux0 = (1.0/(1.0 + (n*n))) * # <<<<<<<<<<<<<< res *= -aux0 kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_n * __pyx_v_n)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 198; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux0 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":199 res -= (Y1*(1.0 + (n*n)) + * g*n)*aux16 aux0 = (1.0/(1.0 + (n*n))) res *= -aux0 # * <<<<<<<<<<<<<< kernel += res return kernel */ __pyx_v_res = (__pyx_v_res * (-__pyx_v_aux0)); /* * "fatiando/gravmag/_polyprism.pyx":200 aux0 = (1.0/(1.0 + (n*n))) res * *= -aux0 kernel += res # <<<<<<<<<<<<<< return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* * "fatiando/gravmag/_polyprism.pyx":201 res *= -aux0 kernel += res * return kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* * "fatiando/gravmag/_polyprism.pyx":162 return kernel * * cdef inline double kernelxz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelxz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":203 return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* * "fatiando/gravmag/_polyprism.pyx":210 aux14, aux15, p, m, c, d1, d2, \ * R11, R12, R21, R22, res DTYPE_T dummy = 1e-10 # Used to avoid * singularities # <<<<<<<<<<<<<< kernel = 0 aux0 = X2 - X1 + * dummy */ __pyx_v_dummy = 1e-10; /* * "fatiando/gravmag/_polyprism.pyx":211 R11, R12, R21, R22, res DTYPE_T * dummy = 1e-10 # Used to avoid singularities kernel = 0 # * <<<<<<<<<<<<<< aux0 = X2 - X1 + dummy aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":212 DTYPE_T dummy = 1e-10 # Used to * avoid singularities kernel = 0 aux0 = X2 - X1 + dummy # * <<<<<<<<<<<<<< aux1 = Y2 - Y1 + dummy m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":213 kernel = 0 aux0 = X2 - X1 + * dummy aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< m = * (aux1/aux0) c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":214 aux0 = X2 - X1 + dummy aux1 = Y2 * - Y1 + dummy m = (aux1/aux0) # <<<<<<<<<<<<<< c = Y1 - * (X1*m) aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 214; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* * "fatiando/gravmag/_polyprism.pyx":215 aux1 = Y2 - Y1 + dummy m = * (aux1/aux0) c = Y1 - (X1*m) # <<<<<<<<<<<<<< aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* * "fatiando/gravmag/_polyprism.pyx":216 m = (aux1/aux0) c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* * "fatiando/gravmag/_polyprism.pyx":217 c = Y1 - (X1*m) aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) # * <<<<<<<<<<<<<< p = ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":218 aux2 = sqrt((aux0*aux0) + * (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy * # <<<<<<<<<<<<<< aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + * (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 218; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":219 aux3 = (X1*Y2) - (X2*Y1) p = * ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) # * <<<<<<<<<<<<<< aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":220 p = ((aux3/aux2)) + dummy aux4 = * (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) # * <<<<<<<<<<<<<< d1 = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":221 aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy # * <<<<<<<<<<<<<< d2 = ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 221; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":222 aux5 = (aux0*X2) + (aux1*Y2) d1 * = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy # * <<<<<<<<<<<<<< aux6 = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 222; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":223 d1 = ((aux4/aux2)) + dummy d2 = * ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) # * <<<<<<<<<<<<<< aux7 = (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":224 d2 = ((aux5/aux2)) + dummy aux6 * = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) # * <<<<<<<<<<<<<< aux8 = Z1*Z1 aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":225 aux6 = (X1*X1) + (Y1*Y1) aux7 = * (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 # <<<<<<<<<<<<<< aux9 = * Z2*Z2 R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* * "fatiando/gravmag/_polyprism.pyx":226 aux7 = (X2*X2) + (Y2*Y2) aux8 = * Z1*Z1 aux9 = Z2*Z2 # <<<<<<<<<<<<<< R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* * "fatiando/gravmag/_polyprism.pyx":227 aux8 = Z1*Z1 aux9 = Z2*Z2 R11 = * sqrt(aux6 + aux8) # <<<<<<<<<<<<<< R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":228 aux9 = Z2*Z2 R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":229 R11 = sqrt(aux6 + aux8) R12 = * sqrt(aux6 + aux9) R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":230 R12 = sqrt(aux6 + aux9) R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":231 R21 = sqrt(aux7 + aux8) R22 = * sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) # * <<<<<<<<<<<<<< aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* * "fatiando/gravmag/_polyprism.pyx":232 R22 = sqrt(aux7 + aux9) aux10 = * atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) # * <<<<<<<<<<<<<< aux12 = aux10 - aux11 aux13 = (aux12/(p*d2)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* * "fatiando/gravmag/_polyprism.pyx":233 aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< aux13 = (aux12/(p*d2)) aux14 = ((p*aux12)/d2) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":234 aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 aux13 = (aux12/(p*d2)) # * <<<<<<<<<<<<<< aux14 = ((p*aux12)/d2) res = (c*X2*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 234; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":235 aux12 = aux10 - aux11 aux13 = * (aux12/(p*d2)) aux14 = ((p*aux12)/d2) # <<<<<<<<<<<<<< res * = (c*X2*aux13) + (m*aux14) aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 235; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d2); /* * "fatiando/gravmag/_polyprism.pyx":236 aux13 = (aux12/(p*d2)) aux14 = * ((p*aux12)/d2) res = (c*X2*aux13) + (m*aux14) # * <<<<<<<<<<<<<< aux10 = atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), * (p*R11)) */ __pyx_v_res = (((__pyx_v_c * __pyx_v_X2) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14)); /* * "fatiando/gravmag/_polyprism.pyx":237 aux14 = ((p*aux12)/d2) res = * (c*X2*aux13) + (m*aux14) aux10 = atan2((Z2*d1), (p*R12)) # * <<<<<<<<<<<<<< aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* * "fatiando/gravmag/_polyprism.pyx":238 res = (c*X2*aux13) + (m*aux14) * aux10 = atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), (p*R11)) * # <<<<<<<<<<<<<< aux12 = aux10 - aux11 aux13 = (aux12/(p*d1)) */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* * "fatiando/gravmag/_polyprism.pyx":239 aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< aux13 = (aux12/(p*d1)) aux14 = ((p*aux12)/d1) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":240 aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 aux13 = (aux12/(p*d1)) # * <<<<<<<<<<<<<< aux14 = ((p*aux12)/d1) res -= (c*X1*aux13) + (m*aux14) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 240; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = (__pyx_v_aux12 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":241 aux12 = aux10 - aux11 aux13 = * (aux12/(p*d1)) aux14 = ((p*aux12)/d1) # <<<<<<<<<<<<<< res * -= (c*X1*aux13) + (m*aux14) aux10 = log(((Z2 + R22) + dummy)) */ __pyx_t_1 = (__pyx_v_p * __pyx_v_aux12); if (unlikely(__pyx_v_d1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (__pyx_t_1 / __pyx_v_d1); /* * "fatiando/gravmag/_polyprism.pyx":242 aux13 = (aux12/(p*d1)) aux14 = * ((p*aux12)/d1) res -= (c*X1*aux13) + (m*aux14) # * <<<<<<<<<<<<<< aux10 = log(((Z2 + R22) + dummy)) aux11 = log(((Z1 + * R21) + dummy)) */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_c * __pyx_v_X1) * __pyx_v_aux13) + (__pyx_v_m * __pyx_v_aux14))); /* * "fatiando/gravmag/_polyprism.pyx":243 aux14 = ((p*aux12)/d1) res -= * (c*X1*aux13) + (m*aux14) aux10 = log(((Z2 + R22) + dummy)) * # <<<<<<<<<<<<<< aux11 = log(((Z1 + R21) + dummy)) aux12 = log(((Z2 + * R12) + dummy)) */ __pyx_v_aux10 = log(((__pyx_v_Z2 + __pyx_v_R22) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":244 res -= (c*X1*aux13) + (m*aux14) * aux10 = log(((Z2 + R22) + dummy)) aux11 = log(((Z1 + R21) + dummy)) * # <<<<<<<<<<<<<< aux12 = log(((Z2 + R12) + dummy)) aux13 = log(((Z1 + * R11) + dummy)) */ __pyx_v_aux11 = log(((__pyx_v_Z1 + __pyx_v_R21) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":245 aux10 = log(((Z2 + R22) + * dummy)) aux11 = log(((Z1 + R21) + dummy)) aux12 = log(((Z2 + R12) + * dummy)) # <<<<<<<<<<<<<< aux13 = log(((Z1 + R11) + dummy)) * aux14 = aux10 - aux11 */ __pyx_v_aux12 = log(((__pyx_v_Z2 + __pyx_v_R12) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":246 aux11 = log(((Z1 + R21) + * dummy)) aux12 = log(((Z2 + R12) + dummy)) aux13 = log(((Z1 + R11) + * dummy)) # <<<<<<<<<<<<<< aux14 = aux10 - aux11 aux15 = * aux12 - aux13 */ __pyx_v_aux13 = log(((__pyx_v_Z1 + __pyx_v_R11) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":247 aux12 = log(((Z2 + R12) + * dummy)) aux13 = log(((Z1 + R11) + dummy)) aux14 = aux10 - aux11 * # <<<<<<<<<<<<<< aux15 = aux12 - aux13 res += (m*(aux15 - aux14)) */ __pyx_v_aux14 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":248 aux13 = log(((Z1 + R11) + * dummy)) aux14 = aux10 - aux11 aux15 = aux12 - aux13 # * <<<<<<<<<<<<<< res += (m*(aux15 - aux14)) aux1 = (1.0/(1.0 + (m*m))) */ __pyx_v_aux15 = (__pyx_v_aux12 - __pyx_v_aux13); /* * "fatiando/gravmag/_polyprism.pyx":249 aux14 = aux10 - aux11 aux15 = * aux12 - aux13 res += (m*(aux15 - aux14)) # <<<<<<<<<<<<<< * aux1 = (1.0/(1.0 + (m*m))) res *= aux1 */ __pyx_v_res = (__pyx_v_res + (__pyx_v_m * (__pyx_v_aux15 - __pyx_v_aux14))); /* * "fatiando/gravmag/_polyprism.pyx":250 aux15 = aux12 - aux13 res += * (m*(aux15 - aux14)) aux1 = (1.0/(1.0 + (m*m))) # * <<<<<<<<<<<<<< res *= aux1 kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 250; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":251 res += (m*(aux15 - aux14)) aux1 * = (1.0/(1.0 + (m*m))) res *= aux1 # <<<<<<<<<<<<<< kernel * += res return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* * "fatiando/gravmag/_polyprism.pyx":252 aux1 = (1.0/(1.0 + (m*m))) res * *= aux1 kernel += res # <<<<<<<<<<<<<< return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* * "fatiando/gravmag/_polyprism.pyx":253 res *= aux1 kernel += res return * kernel # <<<<<<<<<<<<<< * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* * "fatiando/gravmag/_polyprism.pyx":203 return kernel * * cdef inline double kernelyy(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyy", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":255 return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux13; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux14; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux15; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux16; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_m; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_c; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* * "fatiando/gravmag/_polyprism.pyx":262 aux14, aux15, aux16, m, c, d1, * d2, \ R11, R12, R21, R22, res DTYPE_T dummy = 1e-10 # Used to avoid * singularities # <<<<<<<<<<<<<< kernel = 0 aux0 = X2 - X1 + * dummy */ __pyx_v_dummy = 1e-10; /* * "fatiando/gravmag/_polyprism.pyx":263 R11, R12, R21, R22, res DTYPE_T * dummy = 1e-10 # Used to avoid singularities kernel = 0 # * <<<<<<<<<<<<<< aux0 = X2 - X1 + dummy aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":264 DTYPE_T dummy = 1e-10 # Used to * avoid singularities kernel = 0 aux0 = X2 - X1 + dummy # * <<<<<<<<<<<<<< aux1 = Y2 - Y1 + dummy m = (aux1/aux0) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":265 kernel = 0 aux0 = X2 - X1 + * dummy aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< m = * (aux1/aux0) c = Y1 - (X1*m) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":266 aux0 = X2 - X1 + dummy aux1 = Y2 * - Y1 + dummy m = (aux1/aux0) # <<<<<<<<<<<<<< c = Y1 - * (X1*m) aux2 = sqrt((aux0*aux0) + (aux1*aux1)) */ if (unlikely(__pyx_v_aux0 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 266; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_m = (__pyx_v_aux1 / __pyx_v_aux0); /* * "fatiando/gravmag/_polyprism.pyx":267 aux1 = Y2 - Y1 + dummy m = * (aux1/aux0) c = Y1 - (X1*m) # <<<<<<<<<<<<<< aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_c = (__pyx_v_Y1 - (__pyx_v_X1 * __pyx_v_m)); /* * "fatiando/gravmag/_polyprism.pyx":268 m = (aux1/aux0) c = Y1 - (X1*m) * aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # <<<<<<<<<<<<<< * aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* * "fatiando/gravmag/_polyprism.pyx":269 c = Y1 - (X1*m) aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux4 = (aux0*X1) + (aux1*Y1) * # <<<<<<<<<<<<<< aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + * dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":270 aux2 = sqrt((aux0*aux0) + * (aux1*aux1)) aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) * # <<<<<<<<<<<<<< d1 = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":271 aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy # * <<<<<<<<<<<<<< d2 = ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":272 aux5 = (aux0*X2) + (aux1*Y2) d1 * = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy # * <<<<<<<<<<<<<< aux6 = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 272; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":273 d1 = ((aux4/aux2)) + dummy d2 = * ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) # * <<<<<<<<<<<<<< aux7 = (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":274 d2 = ((aux5/aux2)) + dummy aux6 * = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) # * <<<<<<<<<<<<<< aux8 = Z1*Z1 aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":275 aux6 = (X1*X1) + (Y1*Y1) aux7 = * (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 # <<<<<<<<<<<<<< aux9 = * Z2*Z2 R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* * "fatiando/gravmag/_polyprism.pyx":276 aux7 = (X2*X2) + (Y2*Y2) aux8 = * Z1*Z1 aux9 = Z2*Z2 # <<<<<<<<<<<<<< R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* * "fatiando/gravmag/_polyprism.pyx":277 aux8 = Z1*Z1 aux9 = Z2*Z2 R11 = * sqrt(aux6 + aux8) # <<<<<<<<<<<<<< R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":278 aux9 = Z2*Z2 R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":279 R11 = sqrt(aux6 + aux8) R12 = * sqrt(aux6 + aux9) R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":280 R12 = sqrt(aux6 + aux9) R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) aux11 = log((((R12 - * d1)/(R12 + d1)) + dummy)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":281 R21 = sqrt(aux7 + aux8) R22 = * sqrt(aux7 + aux9) aux10 = log((((R11 - d1)/(R11 + d1)) + dummy)) * # <<<<<<<<<<<<<< aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) aux12 * = log((((R21 - d2)/(R21 + d2)) + dummy)) */ __pyx_t_1 = (__pyx_v_R11 - __pyx_v_d1); __pyx_t_2 = (__pyx_v_R11 + __pyx_v_d1); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux10 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":282 R22 = sqrt(aux7 + aux9) aux10 = * log((((R11 - d1)/(R11 + d1)) + dummy)) aux11 = log((((R12 - d1)/(R12 + * d1)) + dummy)) # <<<<<<<<<<<<<< aux12 = log((((R21 - * d2)/(R21 + d2)) + dummy)) aux13 = log((((R22 - d2)/(R22 + d2)) + * dummy)) */ __pyx_t_2 = (__pyx_v_R12 - __pyx_v_d1); __pyx_t_1 = (__pyx_v_R12 + __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux11 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":283 aux10 = log((((R11 - d1)/(R11 + * d1)) + dummy)) aux11 = log((((R12 - d1)/(R12 + d1)) + dummy)) aux12 = * log((((R21 - d2)/(R21 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) aux14 = (1.0/(2*d1)) */ __pyx_t_1 = (__pyx_v_R21 - __pyx_v_d2); __pyx_t_2 = (__pyx_v_R21 + __pyx_v_d2); if (unlikely(__pyx_t_2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux12 = log(((__pyx_t_1 / __pyx_t_2) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":284 aux11 = log((((R12 - d1)/(R12 + * d1)) + dummy)) aux12 = log((((R21 - d2)/(R21 + d2)) + dummy)) aux13 = * log((((R22 - d2)/(R22 + d2)) + dummy)) # <<<<<<<<<<<<<< * aux14 = (1.0/(2*d1)) aux15 = (1.0/(2*d2)) */ __pyx_t_2 = (__pyx_v_R22 - __pyx_v_d2); __pyx_t_1 = (__pyx_v_R22 + __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux13 = log(((__pyx_t_2 / __pyx_t_1) + __pyx_v_dummy)); /* * "fatiando/gravmag/_polyprism.pyx":285 aux12 = log((((R21 - d2)/(R21 + * d2)) + dummy)) aux13 = log((((R22 - d2)/(R22 + d2)) + dummy)) aux14 = * (1.0/(2*d1)) # <<<<<<<<<<<<<< aux15 = (1.0/(2*d2)) aux16 = * aux15*(aux13 - aux12) */ __pyx_t_1 = (2.0 * __pyx_v_d1); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 285; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux14 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":286 aux13 = log((((R22 - d2)/(R22 + * d2)) + dummy)) aux14 = (1.0/(2*d1)) aux15 = (1.0/(2*d2)) # * <<<<<<<<<<<<<< aux16 = aux15*(aux13 - aux12) res = (X2*(1.0 + (m*m)) + * c*m)*aux16 */ __pyx_t_1 = (2.0 * __pyx_v_d2); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 286; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux15 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":287 aux14 = (1.0/(2*d1)) aux15 = * (1.0/(2*d2)) aux16 = aux15*(aux13 - aux12) # * <<<<<<<<<<<<<< res = (X2*(1.0 + (m*m)) + c*m)*aux16 aux16 = * aux14*(aux11 - aux10) */ __pyx_v_aux16 = (__pyx_v_aux15 * (__pyx_v_aux13 - __pyx_v_aux12)); /* * "fatiando/gravmag/_polyprism.pyx":288 aux15 = (1.0/(2*d2)) aux16 = * aux15*(aux13 - aux12) res = (X2*(1.0 + (m*m)) + c*m)*aux16 * # <<<<<<<<<<<<<< aux16 = aux14*(aux11 - aux10) res -= (X1*(1.0 + * (m*m)) + c*m)*aux16 */ __pyx_v_res = (((__pyx_v_X2 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16); /* * "fatiando/gravmag/_polyprism.pyx":289 aux16 = aux15*(aux13 - aux12) * res = (X2*(1.0 + (m*m)) + c*m)*aux16 aux16 = aux14*(aux11 - aux10) * # <<<<<<<<<<<<<< res -= (X1*(1.0 + (m*m)) + c*m)*aux16 aux1 = * (1.0/(1.0 + (m*m))) */ __pyx_v_aux16 = (__pyx_v_aux14 * (__pyx_v_aux11 - __pyx_v_aux10)); /* * "fatiando/gravmag/_polyprism.pyx":290 res = (X2*(1.0 + (m*m)) + * c*m)*aux16 aux16 = aux14*(aux11 - aux10) res -= (X1*(1.0 + (m*m)) + * c*m)*aux16 # <<<<<<<<<<<<<< aux1 = (1.0/(1.0 + (m*m))) res * *= aux1 */ __pyx_v_res = (__pyx_v_res - (((__pyx_v_X1 * (1.0 + (__pyx_v_m * __pyx_v_m))) + (__pyx_v_c * __pyx_v_m)) * __pyx_v_aux16)); /* * "fatiando/gravmag/_polyprism.pyx":291 aux16 = aux14*(aux11 - aux10) * res -= (X1*(1.0 + (m*m)) + c*m)*aux16 aux1 = (1.0/(1.0 + (m*m))) * # <<<<<<<<<<<<<< res *= aux1 kernel += res */ __pyx_t_1 = (1.0 + (__pyx_v_m * __pyx_v_m)); if (unlikely(__pyx_t_1 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 291; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_aux1 = (1.0 / __pyx_t_1); /* * "fatiando/gravmag/_polyprism.pyx":292 res -= (X1*(1.0 + (m*m)) + * c*m)*aux16 aux1 = (1.0/(1.0 + (m*m))) res *= aux1 # * <<<<<<<<<<<<<< kernel += res return kernel */ __pyx_v_res = (__pyx_v_res * __pyx_v_aux1); /* * "fatiando/gravmag/_polyprism.pyx":293 aux1 = (1.0/(1.0 + (m*m))) res * *= aux1 kernel += res # <<<<<<<<<<<<<< return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* * "fatiando/gravmag/_polyprism.pyx":294 res *= aux1 kernel += res return * kernel # <<<<<<<<<<<<<< * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* * "fatiando/gravmag/_polyprism.pyx":255 return kernel * * cdef inline double kernelyz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelyz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":296 return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ static CYTHON_INLINE double __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(double __pyx_v_X1, double __pyx_v_Y1, double __pyx_v_X2, double __pyx_v_Y2, double __pyx_v_Z1, double __pyx_v_Z2, CYTHON_UNUSED double __pyx_v_Z1_sqr, CYTHON_UNUSED double __pyx_v_Z2_sqr) { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux7; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux8; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux9; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux10; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_aux12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_p; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_d2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R11; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R12; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R21; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_R22; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_res; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_dummy; double __pyx_r; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* * "fatiando/gravmag/_polyprism.pyx":302 aux5, aux6, aux7, aux8, aux9, * aux10, aux11, aux12, p, d1, d2, \ R11, R12, R21, R22, res DTYPE_T * dummy = 1e-10 # Used to avoid singularities # * <<<<<<<<<<<<<< kernel = 0 aux0 = X2 - X1 + dummy */ __pyx_v_dummy = 1e-10; /* * "fatiando/gravmag/_polyprism.pyx":303 R11, R12, R21, R22, res DTYPE_T * dummy = 1e-10 # Used to avoid singularities kernel = 0 # * <<<<<<<<<<<<<< aux0 = X2 - X1 + dummy aux1 = Y2 - Y1 + dummy */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":304 DTYPE_T dummy = 1e-10 # Used to * avoid singularities kernel = 0 aux0 = X2 - X1 + dummy # * <<<<<<<<<<<<<< aux1 = Y2 - Y1 + dummy aux2 = sqrt((aux0*aux0) + * (aux1*aux1)) */ __pyx_v_aux0 = ((__pyx_v_X2 - __pyx_v_X1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":305 kernel = 0 aux0 = X2 - X1 + * dummy aux1 = Y2 - Y1 + dummy # <<<<<<<<<<<<<< aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) */ __pyx_v_aux1 = ((__pyx_v_Y2 - __pyx_v_Y1) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":306 aux0 = X2 - X1 + dummy aux1 = Y2 * - Y1 + dummy aux2 = sqrt((aux0*aux0) + (aux1*aux1)) # * <<<<<<<<<<<<<< aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy */ __pyx_v_aux2 = sqrt(((__pyx_v_aux0 * __pyx_v_aux0) + (__pyx_v_aux1 * __pyx_v_aux1))); /* * "fatiando/gravmag/_polyprism.pyx":307 aux1 = Y2 - Y1 + dummy aux2 = * sqrt((aux0*aux0) + (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) # * <<<<<<<<<<<<<< p = ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) */ __pyx_v_aux3 = ((__pyx_v_X1 * __pyx_v_Y2) - (__pyx_v_X2 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":308 aux2 = sqrt((aux0*aux0) + * (aux1*aux1)) aux3 = (X1*Y2) - (X2*Y1) p = ((aux3/aux2)) + dummy * # <<<<<<<<<<<<<< aux4 = (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + * (aux1*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_p = ((__pyx_v_aux3 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":309 aux3 = (X1*Y2) - (X2*Y1) p = * ((aux3/aux2)) + dummy aux4 = (aux0*X1) + (aux1*Y1) # * <<<<<<<<<<<<<< aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy */ __pyx_v_aux4 = ((__pyx_v_aux0 * __pyx_v_X1) + (__pyx_v_aux1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":310 p = ((aux3/aux2)) + dummy aux4 = * (aux0*X1) + (aux1*Y1) aux5 = (aux0*X2) + (aux1*Y2) # * <<<<<<<<<<<<<< d1 = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy */ __pyx_v_aux5 = ((__pyx_v_aux0 * __pyx_v_X2) + (__pyx_v_aux1 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":311 aux4 = (aux0*X1) + (aux1*Y1) * aux5 = (aux0*X2) + (aux1*Y2) d1 = ((aux4/aux2)) + dummy # * <<<<<<<<<<<<<< d2 = ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 311; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d1 = ((__pyx_v_aux4 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":312 aux5 = (aux0*X2) + (aux1*Y2) d1 * = ((aux4/aux2)) + dummy d2 = ((aux5/aux2)) + dummy # * <<<<<<<<<<<<<< aux6 = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) */ if (unlikely(__pyx_v_aux2 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 312; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_v_d2 = ((__pyx_v_aux5 / __pyx_v_aux2) + __pyx_v_dummy); /* * "fatiando/gravmag/_polyprism.pyx":313 d1 = ((aux4/aux2)) + dummy d2 = * ((aux5/aux2)) + dummy aux6 = (X1*X1) + (Y1*Y1) # * <<<<<<<<<<<<<< aux7 = (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 */ __pyx_v_aux6 = ((__pyx_v_X1 * __pyx_v_X1) + (__pyx_v_Y1 * __pyx_v_Y1)); /* * "fatiando/gravmag/_polyprism.pyx":314 d2 = ((aux5/aux2)) + dummy aux6 * = (X1*X1) + (Y1*Y1) aux7 = (X2*X2) + (Y2*Y2) # * <<<<<<<<<<<<<< aux8 = Z1*Z1 aux9 = Z2*Z2 */ __pyx_v_aux7 = ((__pyx_v_X2 * __pyx_v_X2) + (__pyx_v_Y2 * __pyx_v_Y2)); /* * "fatiando/gravmag/_polyprism.pyx":315 aux6 = (X1*X1) + (Y1*Y1) aux7 = * (X2*X2) + (Y2*Y2) aux8 = Z1*Z1 # <<<<<<<<<<<<<< aux9 = * Z2*Z2 R11 = sqrt(aux6 + aux8) */ __pyx_v_aux8 = (__pyx_v_Z1 * __pyx_v_Z1); /* * "fatiando/gravmag/_polyprism.pyx":316 aux7 = (X2*X2) + (Y2*Y2) aux8 = * Z1*Z1 aux9 = Z2*Z2 # <<<<<<<<<<<<<< R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) */ __pyx_v_aux9 = (__pyx_v_Z2 * __pyx_v_Z2); /* * "fatiando/gravmag/_polyprism.pyx":317 aux8 = Z1*Z1 aux9 = Z2*Z2 R11 = * sqrt(aux6 + aux8) # <<<<<<<<<<<<<< R12 = sqrt(aux6 + aux9) * R21 = sqrt(aux7 + aux8) */ __pyx_v_R11 = sqrt((__pyx_v_aux6 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":318 aux9 = Z2*Z2 R11 = sqrt(aux6 + * aux8) R12 = sqrt(aux6 + aux9) # <<<<<<<<<<<<<< R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) */ __pyx_v_R12 = sqrt((__pyx_v_aux6 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":319 R11 = sqrt(aux6 + aux8) R12 = * sqrt(aux6 + aux9) R21 = sqrt(aux7 + aux8) # <<<<<<<<<<<<<< * R22 = sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) */ __pyx_v_R21 = sqrt((__pyx_v_aux7 + __pyx_v_aux8)); /* * "fatiando/gravmag/_polyprism.pyx":320 R12 = sqrt(aux6 + aux9) R21 = * sqrt(aux7 + aux8) R22 = sqrt(aux7 + aux9) # <<<<<<<<<<<<<< * aux10 = atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) */ __pyx_v_R22 = sqrt((__pyx_v_aux7 + __pyx_v_aux9)); /* * "fatiando/gravmag/_polyprism.pyx":321 R21 = sqrt(aux7 + aux8) R22 = * sqrt(aux7 + aux9) aux10 = atan2((Z2*d2), (p*R22)) # * <<<<<<<<<<<<<< aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R22)); /* * "fatiando/gravmag/_polyprism.pyx":322 R22 = sqrt(aux7 + aux9) aux10 = * atan2((Z2*d2), (p*R22)) aux11 = atan2((Z1*d2), (p*R21)) # * <<<<<<<<<<<<<< aux12 = aux10 - aux11 res = aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d2), (__pyx_v_p * __pyx_v_R21)); /* * "fatiando/gravmag/_polyprism.pyx":323 aux10 = atan2((Z2*d2), (p*R22)) * aux11 = atan2((Z1*d2), (p*R21)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< res = aux12 aux10 = atan2((Z2*d1), (p*R12)) */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":324 aux11 = atan2((Z1*d2), (p*R21)) * aux12 = aux10 - aux11 res = aux12 # <<<<<<<<<<<<<< aux10 = * atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), (p*R11)) */ __pyx_v_res = __pyx_v_aux12; /* * "fatiando/gravmag/_polyprism.pyx":325 aux12 = aux10 - aux11 res = * aux12 aux10 = atan2((Z2*d1), (p*R12)) # <<<<<<<<<<<<<< * aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 */ __pyx_v_aux10 = atan2((__pyx_v_Z2 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R12)); /* * "fatiando/gravmag/_polyprism.pyx":326 res = aux12 aux10 = * atan2((Z2*d1), (p*R12)) aux11 = atan2((Z1*d1), (p*R11)) # * <<<<<<<<<<<<<< aux12 = aux10 - aux11 res -= aux12 */ __pyx_v_aux11 = atan2((__pyx_v_Z1 * __pyx_v_d1), (__pyx_v_p * __pyx_v_R11)); /* * "fatiando/gravmag/_polyprism.pyx":327 aux10 = atan2((Z2*d1), (p*R12)) * aux11 = atan2((Z1*d1), (p*R11)) aux12 = aux10 - aux11 # * <<<<<<<<<<<<<< res -= aux12 kernel += res */ __pyx_v_aux12 = (__pyx_v_aux10 - __pyx_v_aux11); /* * "fatiando/gravmag/_polyprism.pyx":328 aux11 = atan2((Z1*d1), (p*R11)) * aux12 = aux10 - aux11 res -= aux12 # <<<<<<<<<<<<<< kernel * += res return kernel */ __pyx_v_res = (__pyx_v_res - __pyx_v_aux12); /* * "fatiando/gravmag/_polyprism.pyx":329 aux12 = aux10 - aux11 res -= * aux12 kernel += res # <<<<<<<<<<<<<< return kernel * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_v_res); /* * "fatiando/gravmag/_polyprism.pyx":330 res -= aux12 kernel += res * return kernel # <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_r = __pyx_v_kernel; goto __pyx_L0; /* * "fatiando/gravmag/_polyprism.pyx":296 return kernel * * cdef inline double kernelzz(double X1, double Y1, double X2, double Y2, * # <<<<<<<<<<<<<< double Z1, double Z2, double Z1_sqr, double Z2_sqr) * nogil: cdef: */ /* function exit code */ __pyx_L1_error:; __Pyx_WriteUnraisable("fatiando.gravmag._polyprism.kernelzz", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":334 @cython.wraparound(False) * @cython.boundscheck(False) def gz(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_gz[] = "gz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz = {__Pyx_NAMESTR("gz"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_1gz, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_gz)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_1gz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_density, &__pyx_n_s_res, 0}; PyObject *values[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gz") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 339; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 335; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 336; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 338; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 340; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_gz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_gz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":344 unsigned int nverts, size, i, k, * kp1 DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = * len(x) # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 344; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":345 DTYPE_T kernel, X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 345; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":346 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":347 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":348 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":349 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":350 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":351 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< kernel = 0 for k in * range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":352 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 kernel = 0 * # <<<<<<<<<<<<<< for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":353 * Z2_sqr = Z2**2 kernel = 0 for k in * range(nverts): # * <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = y[k] * - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":354 * kernel = 0 for k in range(nverts): X1 * = x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":355 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":356 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":357 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * kernel += kernelz(X1, Y1, X2, Y2, Z1, * Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":358 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< kernel += kernelz(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) * res[i] += kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":359 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] kernel += kernelz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) * # <<<<<<<<<<<<<< res[i] += * kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":360 Y2 = * y[kp1] - yp[i] kernel += kernelz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * kernel*density # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Y1; __pyx_parallel_temp1 = __pyx_v_Z2_sqr; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_Y2; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_kp1; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Y1 = __pyx_parallel_temp0; __pyx_v_Z2_sqr = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_Y2 = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_kp1 = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":346 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":334 @cython.wraparound(False) * @cython.boundscheck(False) def gz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.gz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":364 @cython.wraparound(False) * @cython.boundscheck(False) def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx[] = "gxx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx = {__Pyx_NAMESTR("gxx"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_2gxx)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_3gxx(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_density, &__pyx_n_s_res, 0}; PyObject *values[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxx") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxx", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 365; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 368; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_2gxx(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":374 unsigned int nverts, size, i, k, * kp1 DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = * len(x) # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":375 DTYPE_T kernel, X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":376 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":377 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_kp1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* * "fatiando/gravmag/_polyprism.pyx":378 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":379 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":380 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":381 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< kernel = 0 for k in * range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":382 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 kernel = 0 * # <<<<<<<<<<<<<< for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":383 * Z2_sqr = Z2**2 kernel = 0 for k in * range(nverts): # * <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = y[k] * - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":384 * kernel = 0 for k in range(nverts): X1 * = x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":385 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":386 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":387 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * kernel += kernelxx(X1, Y1, X2, Y2, Z1, * Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":388 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< kernel += * kernelxx(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) res[i] += * kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":389 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] kernel += kernelxx(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) * # <<<<<<<<<<<<<< res[i] += * kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":390 Y2 = * y[kp1] - yp[i] kernel += kernelxx(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * kernel*density # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates1) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_X2; __pyx_parallel_temp1 = __pyx_v_kernel; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_i; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Y2; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_Z2; __pyx_parallel_temp11 = __pyx_v_kp1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_X2 = __pyx_parallel_temp0; __pyx_v_kernel = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_i = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Y2 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_Z2 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":376 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":364 @cython.wraparound(False) * @cython.boundscheck(False) def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":394 @cython.wraparound(False) * @cython.boundscheck(False) def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy[] = "gxy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy = {__Pyx_NAMESTR("gxy"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_4gxy)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_5gxy(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_density, &__pyx_n_s_res, 0}; PyObject *values[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxy") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 399; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 398; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 400; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_4gxy(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":404 unsigned int nverts, size, i, k, * kp1 DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = * len(x) # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 404; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":405 DTYPE_T kernel, X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 405; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":406 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":407 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_kp1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); /* * "fatiando/gravmag/_polyprism.pyx":408 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":409 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":410 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":411 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< kernel = 0 for k in * range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":412 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 kernel = 0 * # <<<<<<<<<<<<<< for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":413 * Z2_sqr = Z2**2 kernel = 0 for k in * range(nverts): # * <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = y[k] * - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":414 * kernel = 0 for k in range(nverts): X1 * = x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":415 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":416 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 416; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":417 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * kernel += kernelxy(X1, Y1, X2, Y2, Z1, * Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":418 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< kernel += * kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) res[i] += * kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":419 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] kernel += kernelxy(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) * # <<<<<<<<<<<<<< res[i] += * kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":420 Y2 = * y[kp1] - yp[i] kernel += kernelxy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * kernel*density # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates2) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_k; __pyx_parallel_temp1 = __pyx_v_Y1; __pyx_parallel_temp2 = __pyx_v_X2; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Y2; __pyx_parallel_temp5 = __pyx_v_Z2_sqr; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_Z2; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_X1; __pyx_parallel_temp10 = __pyx_v_kp1; __pyx_parallel_temp11 = __pyx_v_i; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_k = __pyx_parallel_temp0; __pyx_v_Y1 = __pyx_parallel_temp1; __pyx_v_X2 = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Y2 = __pyx_parallel_temp4; __pyx_v_Z2_sqr = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_Z2 = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_X1 = __pyx_parallel_temp9; __pyx_v_kp1 = __pyx_parallel_temp10; __pyx_v_i = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":406 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":394 @cython.wraparound(False) * @cython.boundscheck(False) def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":424 @cython.wraparound(False) * @cython.boundscheck(False) def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz[] = "gxz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz = {__Pyx_NAMESTR("gxz"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_6gxz)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_7gxz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gxz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_density, &__pyx_n_s_res, 0}; PyObject *values[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gxz") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gxz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 426; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 428; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 430; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_6gxz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gxz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":434 unsigned int nverts, size, i, k, * kp1 DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = * len(x) # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":435 DTYPE_T kernel, X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 435; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":436 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":437 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); unsigned int __pyx_parallel_temp5 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); unsigned int __pyx_parallel_temp9 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":438 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":439 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":440 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":441 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< kernel = 0 for k in * range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":442 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 kernel = 0 * # <<<<<<<<<<<<<< for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":443 * Z2_sqr = Z2**2 kernel = 0 for k in * range(nverts): # * <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = y[k] * - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":444 * kernel = 0 for k in range(nverts): X1 * = x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":445 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":446 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":447 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * kernel += kernelxz(X1, Y1, X2, Y2, Z1, * Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":448 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< kernel += * kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) res[i] += * kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":449 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] kernel += kernelxz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) * # <<<<<<<<<<<<<< res[i] += * kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":450 Y2 = * y[kp1] - yp[i] kernel += kernelxz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * kernel*density # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates3) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kernel; __pyx_parallel_temp1 = __pyx_v_Y2; __pyx_parallel_temp2 = __pyx_v_k; __pyx_parallel_temp3 = __pyx_v_Z1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_kp1; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_i; __pyx_parallel_temp10 = __pyx_v_X1; __pyx_parallel_temp11 = __pyx_v_Y1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kernel = __pyx_parallel_temp0; __pyx_v_Y2 = __pyx_parallel_temp1; __pyx_v_k = __pyx_parallel_temp2; __pyx_v_Z1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_kp1 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_i = __pyx_parallel_temp9; __pyx_v_X1 = __pyx_parallel_temp10; __pyx_v_Y1 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":436 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":424 @cython.wraparound(False) * @cython.boundscheck(False) def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.gxz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":454 @cython.wraparound(False) * @cython.boundscheck(False) def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy[] = "gyy(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy = {__Pyx_NAMESTR("gyy"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_8gyy)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_9gyy(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyy (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_density, &__pyx_n_s_res, 0}; PyObject *values[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyy") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 459; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyy", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 455; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 457; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 458; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_8gyy(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyy", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":464 unsigned int nverts, size, i, k, * kp1 DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = * len(x) # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":465 DTYPE_T kernel, X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 465; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":466 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":467 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); unsigned int __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":468 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":469 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":470 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":471 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< kernel = 0 for k in * range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":472 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 kernel = 0 * # <<<<<<<<<<<<<< for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":473 * Z2_sqr = Z2**2 kernel = 0 for k in * range(nverts): # * <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = y[k] * - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":474 * kernel = 0 for k in range(nverts): X1 * = x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":475 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":476 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 476; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":477 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * kernel += kernelyy(X1, Y1, X2, Y2, Z1, * Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":478 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< kernel += * kernelyy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) res[i] += * kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":479 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] kernel += kernelyy(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) * # <<<<<<<<<<<<<< res[i] += * kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":480 Y2 = * y[kp1] - yp[i] kernel += kernelyy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * kernel*density # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates4) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_Z1_sqr; __pyx_parallel_temp2 = __pyx_v_i; __pyx_parallel_temp3 = __pyx_v_kernel; __pyx_parallel_temp4 = __pyx_v_Z2; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Y1; __pyx_parallel_temp7 = __pyx_v_X2; __pyx_parallel_temp8 = __pyx_v_X1; __pyx_parallel_temp9 = __pyx_v_Z2_sqr; __pyx_parallel_temp10 = __pyx_v_k; __pyx_parallel_temp11 = __pyx_v_Y2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_Z1_sqr = __pyx_parallel_temp1; __pyx_v_i = __pyx_parallel_temp2; __pyx_v_kernel = __pyx_parallel_temp3; __pyx_v_Z2 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Y1 = __pyx_parallel_temp6; __pyx_v_X2 = __pyx_parallel_temp7; __pyx_v_X1 = __pyx_parallel_temp8; __pyx_v_Z2_sqr = __pyx_parallel_temp9; __pyx_v_k = __pyx_parallel_temp10; __pyx_v_Y2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":466 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":454 @cython.wraparound(False) * @cython.boundscheck(False) def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":484 @cython.wraparound(False) * @cython.boundscheck(False) def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz[] = "gyz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz = {__Pyx_NAMESTR("gyz"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_10gyz)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_11gyz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gyz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_density, &__pyx_n_s_res, 0}; PyObject *values[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gyz") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 489; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gyz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 485; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 486; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 487; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 488; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 490; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_10gyz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gyz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":494 unsigned int nverts, size, i, k, * kp1 DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = * len(x) # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 494; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":495 DTYPE_T kernel, X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 495; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":496 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":497 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; unsigned int __pyx_parallel_temp11 = 0xbad0bad0; const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_k) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); /* * "fatiando/gravmag/_polyprism.pyx":498 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":499 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":500 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":501 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< kernel = 0 for k in * range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":502 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 kernel = 0 * # <<<<<<<<<<<<<< for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":503 * Z2_sqr = Z2**2 kernel = 0 for k in * range(nverts): # * <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = y[k] * - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":504 * kernel = 0 for k in range(nverts): X1 * = x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":505 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":506 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 506; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":507 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * kernel += kernelyz(X1, Y1, X2, Y2, Z1, * Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":508 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< kernel += * kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) res[i] += * kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":509 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] kernel += kernelyz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) * # <<<<<<<<<<<<<< res[i] += * kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":510 Y2 = * y[kp1] - yp[i] kernel += kernelyz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * kernel*density # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates5) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_kp1; __pyx_parallel_temp2 = __pyx_v_Z1; __pyx_parallel_temp3 = __pyx_v_X1; __pyx_parallel_temp4 = __pyx_v_Z2_sqr; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_X2; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_Y1; __pyx_parallel_temp9 = __pyx_v_kernel; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_k; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_kp1 = __pyx_parallel_temp1; __pyx_v_Z1 = __pyx_parallel_temp2; __pyx_v_X1 = __pyx_parallel_temp3; __pyx_v_Z2_sqr = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_X2 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_Y1 = __pyx_parallel_temp8; __pyx_v_kernel = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_k = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":496 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":484 @cython.wraparound(False) * @cython.boundscheck(False) def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.gyz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":514 @cython.wraparound(False) * @cython.boundscheck(False) def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz[] = "gzz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double density, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz = {__Pyx_NAMESTR("gzz"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_12gzz)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_13gzz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_density; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gzz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_density, &__pyx_n_s_res, 0}; PyObject *values[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_density)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gzz") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 9) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_density = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_density == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[8]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gzz", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 520; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_density, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_12gzz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_density, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_kernel; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("gzz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":524 unsigned int nverts, size, i, k, * kp1 DTYPE_T kernel, X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = * len(x) # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":525 DTYPE_T kernel, X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":526 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":527 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { unsigned int __pyx_parallel_temp0 = 0xbad0bad0; unsigned int __pyx_parallel_temp1 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_kernel) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z2_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":528 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":529 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":530 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 kernel = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":531 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< kernel = 0 for k in * range(nverts): */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":532 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 kernel = 0 * # <<<<<<<<<<<<<< for k in range(nverts): * X1 = x[k] - xp[i] */ __pyx_v_kernel = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":533 * Z2_sqr = Z2**2 kernel = 0 for k in * range(nverts): # * <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = y[k] * - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":534 * kernel = 0 for k in range(nverts): X1 * = x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":535 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":536 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":537 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * kernel += kernelzz(X1, Y1, X2, Y2, Z1, * Z2, Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":538 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< kernel += * kernelzz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) res[i] += * kernel*density */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":539 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] kernel += kernelzz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) * # <<<<<<<<<<<<<< res[i] += * kernel*density * */ __pyx_v_kernel = (__pyx_v_kernel + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":540 Y2 = * y[kp1] - yp[i] kernel += kernelzz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * kernel*density # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (__pyx_v_kernel * __pyx_v_density); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates6) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_kp1; __pyx_parallel_temp1 = __pyx_v_k; __pyx_parallel_temp2 = __pyx_v_Z2_sqr; __pyx_parallel_temp3 = __pyx_v_i; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Y2; __pyx_parallel_temp6 = __pyx_v_Z1; __pyx_parallel_temp7 = __pyx_v_Z1_sqr; __pyx_parallel_temp8 = __pyx_v_kernel; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_kp1 = __pyx_parallel_temp0; __pyx_v_k = __pyx_parallel_temp1; __pyx_v_Z2_sqr = __pyx_parallel_temp2; __pyx_v_i = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Y2 = __pyx_parallel_temp5; __pyx_v_Z1 = __pyx_parallel_temp6; __pyx_v_Z1_sqr = __pyx_parallel_temp7; __pyx_v_kernel = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_X2 = __pyx_parallel_temp11; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":526 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":514 @cython.wraparound(False) * @cython.boundscheck(False) def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.gzz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":544 @cython.wraparound(False) * @cython.boundscheck(False) def tf(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_14tf[] = "tf(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, double fx, double fy, double fz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf = {__Pyx_NAMESTR("tf"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_15tf, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_14tf)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_15tf(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; double __pyx_v_fx; double __pyx_v_fy; double __pyx_v_fz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("tf (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_mx, &__pyx_n_s_my, &__pyx_n_s_mz, &__pyx_n_s_fx, &__pyx_n_s_fy, &__pyx_n_s_fz, &__pyx_n_s_res, 0}; PyObject *values[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 9); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 10); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fy)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 11); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_fz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 12); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 13: if (likely((values[13] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, 13); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "tf") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 14) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); values[13] = PyTuple_GET_ITEM(__pyx_args, 13); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_fx = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_fx == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_fy = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_fy == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_fz = __pyx_PyFloat_AsDouble(values[12]); if (unlikely((__pyx_v_fz == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 550; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[13]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("tf", 1, 14, 14, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 545; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 547; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 548; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 551; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_fx, __pyx_v_fy, __pyx_v_fz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_14tf(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, double __pyx_v_fx, double __pyx_v_fy, double __pyx_v_fz, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("tf", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":556 DTYPE_T v1, v2, v3, v4, v5, v6 * DTYPE_T X1, Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) * # <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 556; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":557 DTYPE_T X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 557; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":558 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":559 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); unsigned int __pyx_parallel_temp13 = 0xbad0bad0; unsigned int __pyx_parallel_temp14 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp15 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp16 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v5) reduction(+:__pyx_v_v6) reduction(+:__pyx_v_v1) reduction(+:__pyx_v_v4) reduction(+:__pyx_v_v3) reduction(+:__pyx_v_v2) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_Z2_sqr) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":560 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":561 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":562 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":563 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< v1 = 0 v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":564 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 v1 = 0 * # <<<<<<<<<<<<<< v2 = 0 v3 = 0 */ __pyx_v_v1 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":565 * Z2_sqr = Z2**2 v1 = 0 v2 = 0 # * <<<<<<<<<<<<<< v3 = 0 v4 = 0 */ __pyx_v_v2 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":566 v1 = * 0 v2 = 0 v3 = 0 # * <<<<<<<<<<<<<< v4 = 0 v5 = 0 */ __pyx_v_v3 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":567 v2 = * 0 v3 = 0 v4 = 0 # * <<<<<<<<<<<<<< v5 = 0 v6 = 0 */ __pyx_v_v4 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":568 v3 = * 0 v4 = 0 v5 = 0 # * <<<<<<<<<<<<<< v6 = 0 for k in * range(nverts): */ __pyx_v_v5 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":569 v4 = * 0 v5 = 0 v6 = 0 # * <<<<<<<<<<<<<< for k in range(nverts): X1 * = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":570 v5 = * 0 v6 = 0 for k in range(nverts): * # <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = * y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":571 * v6 = 0 for k in range(nverts): X1 = * x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":572 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":573 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 573; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":574 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":575 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< v1 += kernelxx(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v2 * += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":576 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] v1 += kernelxx(X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v2 += kernelxy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v3 += * kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":577 * Y2 = y[kp1] - yp[i] v1 += kernelxx(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v2 * += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v3 += kernelxz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v4 += * kernelyy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":578 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v2 += kernelxy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v3 += * kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v4 += kernelyy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v5 += * kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":579 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v3 += kernelxz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v4 += * kernelyy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v5 += kernelyz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v6 += * kernelzz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":580 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v4 += kernelyy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v5 += * kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v6 += kernelzz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] * += (fx*(v1*mx + v2*my + v3*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":581 * v4 += kernelyy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v5 += kernelyz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v6 += * kernelzz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< res[i] += (fx*(v1*mx + * v2*my + v3*mz) + fy*(v2*mx + v4*my + * v5*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":582 v5 * += kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v6 += kernelzz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * (fx*(v1*mx + v2*my + v3*mz) # * <<<<<<<<<<<<<< + fy*(v2*mx + v4*my + * v5*mz) + fz*(v3*mx + v5*my + v6*mz)) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_fx * (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz))) + (__pyx_v_fy * (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)))) + (__pyx_v_fz * (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)))); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates7) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2; __pyx_parallel_temp1 = __pyx_v_v5; __pyx_parallel_temp2 = __pyx_v_v6; __pyx_parallel_temp3 = __pyx_v_Y1; __pyx_parallel_temp4 = __pyx_v_v1; __pyx_parallel_temp5 = __pyx_v_v4; __pyx_parallel_temp6 = __pyx_v_v3; __pyx_parallel_temp7 = __pyx_v_Z2_sqr; __pyx_parallel_temp8 = __pyx_v_i; __pyx_parallel_temp9 = __pyx_v_v2; __pyx_parallel_temp10 = __pyx_v_Z1_sqr; __pyx_parallel_temp11 = __pyx_v_X1; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_kp1; __pyx_parallel_temp14 = __pyx_v_k; __pyx_parallel_temp15 = __pyx_v_X2; __pyx_parallel_temp16 = __pyx_v_Z1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2 = __pyx_parallel_temp0; __pyx_v_v5 = __pyx_parallel_temp1; __pyx_v_v6 = __pyx_parallel_temp2; __pyx_v_Y1 = __pyx_parallel_temp3; __pyx_v_v1 = __pyx_parallel_temp4; __pyx_v_v4 = __pyx_parallel_temp5; __pyx_v_v3 = __pyx_parallel_temp6; __pyx_v_Z2_sqr = __pyx_parallel_temp7; __pyx_v_i = __pyx_parallel_temp8; __pyx_v_v2 = __pyx_parallel_temp9; __pyx_v_Z1_sqr = __pyx_parallel_temp10; __pyx_v_X1 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_kp1 = __pyx_parallel_temp13; __pyx_v_k = __pyx_parallel_temp14; __pyx_v_X2 = __pyx_parallel_temp15; __pyx_v_Z1 = __pyx_parallel_temp16; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":558 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":544 @cython.wraparound(False) * @cython.boundscheck(False) def tf(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.tf", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":588 @cython.wraparound(False) * @cython.boundscheck(False) def bx(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_16bx[] = "bx(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx = {__Pyx_NAMESTR("bx"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_17bx, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_16bx)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_17bx(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bx (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_mx, &__pyx_n_s_my, &__pyx_n_s_mz, &__pyx_n_s_res, 0}; PyObject *values[11] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 9); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, 10); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bx") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 594; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bx", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 589; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 590; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 591; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 595; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_16bx(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bx", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":600 DTYPE_T v1, v2, v3 DTYPE_T X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) # * <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":601 DTYPE_T X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 601; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":602 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":603 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); unsigned int __pyx_parallel_temp4 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); unsigned int __pyx_parallel_temp6 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp8 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp10 = __PYX_NAN(); unsigned int __pyx_parallel_temp11 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v1) reduction(+:__pyx_v_v2) reduction(+:__pyx_v_v3) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_X1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_Y1) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_X2) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":604 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":605 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":606 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 v1 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":607 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< v1 = 0 v2 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":608 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 v1 = 0 * # <<<<<<<<<<<<<< v2 = 0 v3 = 0 */ __pyx_v_v1 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":609 * Z2_sqr = Z2**2 v1 = 0 v2 = 0 # * <<<<<<<<<<<<<< v3 = 0 for k in * range(nverts): */ __pyx_v_v2 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":610 v1 = * 0 v2 = 0 v3 = 0 # * <<<<<<<<<<<<<< for k in range(nverts): X1 * = x[k] - xp[i] */ __pyx_v_v3 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":611 v2 = * 0 v3 = 0 for k in range(nverts): * # <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = * y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":612 * v3 = 0 for k in range(nverts): X1 = * x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":613 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":614 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":615 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":616 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< v1 += kernelxx(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v2 * += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":617 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] v1 += kernelxx(X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v2 += kernelxy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v3 += * kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_v_v1 = (__pyx_v_v1 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxx(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":618 * Y2 = y[kp1] - yp[i] v1 += kernelxx(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v2 * += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v3 += kernelxz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] * += (v1*mx + v2*my + v3*mz) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":619 * v1 += kernelxx(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v2 += kernelxy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v3 += * kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< res[i] += (v1*mx + * v2*my + v3*mz) * */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":620 v2 * += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v3 += kernelxz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * (v1*mx + v2*my + v3*mz) # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v1 * __pyx_v_mx) + (__pyx_v_v2 * __pyx_v_my)) + (__pyx_v_v3 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates8) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z2_sqr; __pyx_parallel_temp1 = __pyx_v_v1; __pyx_parallel_temp2 = __pyx_v_Y2; __pyx_parallel_temp3 = __pyx_v_Z1_sqr; __pyx_parallel_temp4 = __pyx_v_k; __pyx_parallel_temp5 = __pyx_v_X1; __pyx_parallel_temp6 = __pyx_v_i; __pyx_parallel_temp7 = __pyx_v_v2; __pyx_parallel_temp8 = __pyx_v_Z2; __pyx_parallel_temp9 = __pyx_v_v3; __pyx_parallel_temp10 = __pyx_v_Y1; __pyx_parallel_temp11 = __pyx_v_kp1; __pyx_parallel_temp12 = __pyx_v_Z1; __pyx_parallel_temp13 = __pyx_v_X2; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z2_sqr = __pyx_parallel_temp0; __pyx_v_v1 = __pyx_parallel_temp1; __pyx_v_Y2 = __pyx_parallel_temp2; __pyx_v_Z1_sqr = __pyx_parallel_temp3; __pyx_v_k = __pyx_parallel_temp4; __pyx_v_X1 = __pyx_parallel_temp5; __pyx_v_i = __pyx_parallel_temp6; __pyx_v_v2 = __pyx_parallel_temp7; __pyx_v_Z2 = __pyx_parallel_temp8; __pyx_v_v3 = __pyx_parallel_temp9; __pyx_v_Y1 = __pyx_parallel_temp10; __pyx_v_kp1 = __pyx_parallel_temp11; __pyx_v_Z1 = __pyx_parallel_temp12; __pyx_v_X2 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":602 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":588 @cython.wraparound(False) * @cython.boundscheck(False) def bx(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.bx", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":624 @cython.wraparound(False) * @cython.boundscheck(False) def by(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_18by[] = "by(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_19by = {__Pyx_NAMESTR("by"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_19by, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_18by)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_19by(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("by (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_mx, &__pyx_n_s_my, &__pyx_n_s_mz, &__pyx_n_s_res, 0}; PyObject *values[11] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 9); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, 10); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "by") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 629; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 630; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("by", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 625; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 626; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 627; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 631; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_18by(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_18by(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v4; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("by", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":636 DTYPE_T v2, v4, v5 DTYPE_T X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) # * <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":637 DTYPE_T X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 637; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":638 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":639 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp3 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); unsigned int __pyx_parallel_temp7 = 0xbad0bad0; unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v4) reduction(+:__pyx_v_v5) reduction(+:__pyx_v_v2) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2) lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Y1) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_X1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":640 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":641 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":642 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 v2 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":643 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< v2 = 0 v4 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":644 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 v2 = 0 * # <<<<<<<<<<<<<< v4 = 0 v5 = 0 */ __pyx_v_v2 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":645 * Z2_sqr = Z2**2 v2 = 0 v4 = 0 # * <<<<<<<<<<<<<< v5 = 0 for k in * range(nverts): */ __pyx_v_v4 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":646 v2 = * 0 v4 = 0 v5 = 0 # * <<<<<<<<<<<<<< for k in range(nverts): X1 * = x[k] - xp[i] */ __pyx_v_v5 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":647 v4 = * 0 v5 = 0 for k in range(nverts): * # <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = * y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":648 * v5 = 0 for k in range(nverts): X1 = * x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":649 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":650 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":651 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":652 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< v2 += kernelxy(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v4 * += kernelyy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":653 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] v2 += kernelxy(X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v4 += kernelyy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v5 += * kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_v_v2 = (__pyx_v_v2 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":654 * Y2 = y[kp1] - yp[i] v2 += kernelxy(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v4 * += kernelyy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v5 += kernelyz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] * += (v2*mx + v4*my + v5*mz) */ __pyx_v_v4 = (__pyx_v_v4 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyy(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":655 * v2 += kernelxy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v4 += kernelyy(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v5 += * kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< res[i] += (v2*mx + * v4*my + v5*mz) * */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":656 v4 * += kernelyy(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v5 += kernelyz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * (v2*mx + v4*my + v5*mz) # * <<<<<<<<<<<<<< * * @cython.wraparound(False) */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v2 * __pyx_v_mx) + (__pyx_v_v4 * __pyx_v_my)) + (__pyx_v_v5 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates9) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_Z1; __pyx_parallel_temp1 = __pyx_v_Z2; __pyx_parallel_temp2 = __pyx_v_v4; __pyx_parallel_temp3 = __pyx_v_v5; __pyx_parallel_temp4 = __pyx_v_v2; __pyx_parallel_temp5 = __pyx_v_X2; __pyx_parallel_temp6 = __pyx_v_Z1_sqr; __pyx_parallel_temp7 = __pyx_v_kp1; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Y1; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_Z2_sqr; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_X1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_Z1 = __pyx_parallel_temp0; __pyx_v_Z2 = __pyx_parallel_temp1; __pyx_v_v4 = __pyx_parallel_temp2; __pyx_v_v5 = __pyx_parallel_temp3; __pyx_v_v2 = __pyx_parallel_temp4; __pyx_v_X2 = __pyx_parallel_temp5; __pyx_v_Z1_sqr = __pyx_parallel_temp6; __pyx_v_kp1 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Y1 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_Z2_sqr = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_X1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":638 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":624 @cython.wraparound(False) * @cython.boundscheck(False) def by(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.by", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "fatiando/gravmag/_polyprism.pyx":660 @cython.wraparound(False) * @cython.boundscheck(False) def bz(numpy.ndarray[DTYPE_T, ndim=1] xp not * None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] yp not * None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* Python wrapper */ static PyObject *__pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds); /* proto */ static char __pyx_doc_8fatiando_7gravmag_10_polyprism_20bz[] = "bz(ndarray xp, ndarray yp, ndarray zp, ndarray x, ndarray y, double z1, double z2, double mx, double my, double mz, ndarray res)"; static PyMethodDef __pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz = {__Pyx_NAMESTR("bz"), (PyCFunction) __pyx_pw_8fatiando_7gravmag_10_polyprism_21bz, METH_VARARGS | METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_8fatiando_7gravmag_10_polyprism_20bz)}; static PyObject * __pyx_pw_8fatiando_7gravmag_10_polyprism_21bz(PyObject * __pyx_self, PyObject * __pyx_args, PyObject * __pyx_kwds) { PyArrayObject *__pyx_v_xp = 0; PyArrayObject *__pyx_v_yp = 0; PyArrayObject *__pyx_v_zp = 0; PyArrayObject *__pyx_v_x = 0; PyArrayObject *__pyx_v_y = 0; double __pyx_v_z1; double __pyx_v_z2; double __pyx_v_mx; double __pyx_v_my; double __pyx_v_mz; PyArrayObject *__pyx_v_res = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("bz (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_xp, &__pyx_n_s_yp, &__pyx_n_s_zp, &__pyx_n_s_x, &__pyx_n_s_y, &__pyx_n_s_z1, &__pyx_n_s_z2, &__pyx_n_s_mx, &__pyx_n_s_my, &__pyx_n_s_mz, &__pyx_n_s_res, 0}; PyObject *values[11] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_xp)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_yp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 1); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_zp)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 2); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_x)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 3); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_y)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 4); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z1)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 5); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_z2)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 6); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mx)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 7); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_my)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 8); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mz)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 9); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_res)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, 10); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bz") < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_xp = ((PyArrayObject *) values[0]); __pyx_v_yp = ((PyArrayObject *) values[1]); __pyx_v_zp = ((PyArrayObject *) values[2]); __pyx_v_x = ((PyArrayObject *) values[3]); __pyx_v_y = ((PyArrayObject *) values[4]); __pyx_v_z1 = __pyx_PyFloat_AsDouble(values[5]); if (unlikely((__pyx_v_z1 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_z2 = __pyx_PyFloat_AsDouble(values[6]); if (unlikely((__pyx_v_z2 == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 665; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mx = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_mx == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_my = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_my == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_mz = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_mz == (double)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 666; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_v_res = ((PyArrayObject *) values[10]); } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("bz", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L3_error; } __pyx_L3_error:; __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_xp), __pyx_ptype_5numpy_ndarray, 0, "xp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_yp), __pyx_ptype_5numpy_ndarray, 0, "yp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 661; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_zp), __pyx_ptype_5numpy_ndarray, 0, "zp", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_x), __pyx_ptype_5numpy_ndarray, 0, "x", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 663; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_y), __pyx_ptype_5numpy_ndarray, 0, "y", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 664; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *) __pyx_v_res), __pyx_ptype_5numpy_ndarray, 0, "res", 0))) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 667; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_r = __pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(__pyx_self, __pyx_v_xp, __pyx_v_yp, __pyx_v_zp, __pyx_v_x, __pyx_v_y, __pyx_v_z1, __pyx_v_z2, __pyx_v_mx, __pyx_v_my, __pyx_v_mz, __pyx_v_res); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject * __pyx_pf_8fatiando_7gravmag_10_polyprism_20bz(CYTHON_UNUSED PyObject * __pyx_self, PyArrayObject * __pyx_v_xp, PyArrayObject * __pyx_v_yp, PyArrayObject * __pyx_v_zp, PyArrayObject * __pyx_v_x, PyArrayObject * __pyx_v_y, double __pyx_v_z1, double __pyx_v_z2, double __pyx_v_mx, double __pyx_v_my, double __pyx_v_mz, PyArrayObject * __pyx_v_res) { unsigned int __pyx_v_nverts; CYTHON_UNUSED unsigned int __pyx_v_size; unsigned int __pyx_v_i; unsigned int __pyx_v_k; unsigned int __pyx_v_kp1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v3; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v5; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_v6; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_X2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Y2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z1_sqr; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_v_Z2_sqr; __Pyx_LocalBuf_ND __pyx_pybuffernd_res; __Pyx_Buffer __pyx_pybuffer_res; __Pyx_LocalBuf_ND __pyx_pybuffernd_x; __Pyx_Buffer __pyx_pybuffer_x; __Pyx_LocalBuf_ND __pyx_pybuffernd_xp; __Pyx_Buffer __pyx_pybuffer_xp; __Pyx_LocalBuf_ND __pyx_pybuffernd_y; __Pyx_Buffer __pyx_pybuffer_y; __Pyx_LocalBuf_ND __pyx_pybuffernd_yp; __Pyx_Buffer __pyx_pybuffer_yp; __Pyx_LocalBuf_ND __pyx_pybuffernd_zp; __Pyx_Buffer __pyx_pybuffer_zp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; unsigned int __pyx_t_2; unsigned int __pyx_t_3; unsigned int __pyx_t_4; unsigned int __pyx_t_5; unsigned int __pyx_t_6; unsigned int __pyx_t_7; unsigned int __pyx_t_8; unsigned int __pyx_t_9; unsigned int __pyx_t_10; unsigned int __pyx_t_11; unsigned int __pyx_t_12; long __pyx_t_13; unsigned int __pyx_t_14; unsigned int __pyx_t_15; unsigned int __pyx_t_16; unsigned int __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("bz", 0); __pyx_pybuffer_xp.pybuffer.buf = NULL; __pyx_pybuffer_xp.refcount = 0; __pyx_pybuffernd_xp.data = NULL; __pyx_pybuffernd_xp.rcbuffer = &__pyx_pybuffer_xp; __pyx_pybuffer_yp.pybuffer.buf = NULL; __pyx_pybuffer_yp.refcount = 0; __pyx_pybuffernd_yp.data = NULL; __pyx_pybuffernd_yp.rcbuffer = &__pyx_pybuffer_yp; __pyx_pybuffer_zp.pybuffer.buf = NULL; __pyx_pybuffer_zp.refcount = 0; __pyx_pybuffernd_zp.data = NULL; __pyx_pybuffernd_zp.rcbuffer = &__pyx_pybuffer_zp; __pyx_pybuffer_x.pybuffer.buf = NULL; __pyx_pybuffer_x.refcount = 0; __pyx_pybuffernd_x.data = NULL; __pyx_pybuffernd_x.rcbuffer = &__pyx_pybuffer_x; __pyx_pybuffer_y.pybuffer.buf = NULL; __pyx_pybuffer_y.refcount = 0; __pyx_pybuffernd_y.data = NULL; __pyx_pybuffernd_y.rcbuffer = &__pyx_pybuffer_y; __pyx_pybuffer_res.pybuffer.buf = NULL; __pyx_pybuffer_res.refcount = 0; __pyx_pybuffernd_res.data = NULL; __pyx_pybuffernd_res.rcbuffer = &__pyx_pybuffer_res; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_xp.rcbuffer->pybuffer, (PyObject *) __pyx_v_xp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_xp.diminfo[0].strides = __pyx_pybuffernd_xp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_xp.diminfo[0].shape = __pyx_pybuffernd_xp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_yp.rcbuffer->pybuffer, (PyObject *) __pyx_v_yp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_yp.diminfo[0].strides = __pyx_pybuffernd_yp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_yp.diminfo[0].shape = __pyx_pybuffernd_yp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_zp.rcbuffer->pybuffer, (PyObject *) __pyx_v_zp, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_zp.diminfo[0].strides = __pyx_pybuffernd_zp.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_zp.diminfo[0].shape = __pyx_pybuffernd_zp.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_x.rcbuffer->pybuffer, (PyObject *) __pyx_v_x, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_x.diminfo[0].strides = __pyx_pybuffernd_x.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_x.diminfo[0].shape = __pyx_pybuffernd_x.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_y.rcbuffer->pybuffer, (PyObject *) __pyx_v_y, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_y.diminfo[0].strides = __pyx_pybuffernd_y.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_y.diminfo[0].shape = __pyx_pybuffernd_y.rcbuffer->pybuffer.shape[0]; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_res.rcbuffer->pybuffer, (PyObject *) __pyx_v_res, &__Pyx_TypeInfo_nn___pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T, PyBUF_FORMAT | PyBUF_STRIDES | PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_pybuffernd_res.diminfo[0].strides = __pyx_pybuffernd_res.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_res.diminfo[0].shape = __pyx_pybuffernd_res.rcbuffer->pybuffer.shape[0]; /* * "fatiando/gravmag/_polyprism.pyx":672 DTYPE_T v3, v5, v6 DTYPE_T X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr nverts = len(x) # * <<<<<<<<<<<<<< size = len(res) with nogil: */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_x)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 672; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_nverts = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":673 DTYPE_T X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr nverts = len(x) size = len(res) # * <<<<<<<<<<<<<< with nogil: for i in prange(size): */ __pyx_t_1 = PyObject_Length(((PyObject *) __pyx_v_res)); if (unlikely(__pyx_t_1 == -1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 673; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_size = __pyx_t_1; /* * "fatiando/gravmag/_polyprism.pyx":674 nverts = len(x) size = len(res) * with nogil: # <<<<<<<<<<<<<< for i in prange(size): Z1 = * z1 - zp[i] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /* try: */ { /* * "fatiando/gravmag/_polyprism.pyx":675 size = len(res) with * nogil: for i in prange(size): # <<<<<<<<<<<<<< Z1 * = z1 - zp[i] Z2 = z2 - zp[i] */ __pyx_t_2 = __pyx_v_size; if (1 == 0) abort(); { __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp0 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp1 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp2 = __PYX_NAN(); unsigned int __pyx_parallel_temp3 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp4 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp5 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp6 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp7 = __PYX_NAN(); unsigned int __pyx_parallel_temp8 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp9 = __PYX_NAN(); unsigned int __pyx_parallel_temp10 = 0xbad0bad0; __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp11 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp12 = __PYX_NAN(); __pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T __pyx_parallel_temp13 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_4 = (__pyx_t_2 - 0) / 1; if (__pyx_t_4 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_v3) reduction(+:__pyx_v_v6) reduction(+:__pyx_v_v5) private(__pyx_t_17, __pyx_t_6, __pyx_t_11, __pyx_t_16, __pyx_t_9, __pyx_t_7, __pyx_t_8, __pyx_t_13, __pyx_t_14, __pyx_t_10, __pyx_t_15, __pyx_t_12, __pyx_t_5) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_X2) lastprivate(__pyx_v_Z1_sqr) lastprivate(__pyx_v_kp1) lastprivate(__pyx_v_X1) lastprivate(__pyx_v_Z1) lastprivate(__pyx_v_Z2_sqr) lastprivate(__pyx_v_k) lastprivate(__pyx_v_Z2) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_Y2) lastprivate(__pyx_v_Y1) #endif /* _OPENMP */ for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_4; __pyx_t_3++) { if (__pyx_parallel_why < 2) { __pyx_v_i = 0 + 1 * __pyx_t_3; /* * Initialize private variables to invalid * values */ __pyx_v_X2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_kp1 = ((unsigned int)0xbad0bad0); __pyx_v_X1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Z2_sqr = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_k = ((unsigned int)0xbad0bad0); __pyx_v_Z2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y2 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); __pyx_v_Y1 = ((__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T) __PYX_NAN()); /* * "fatiando/gravmag/_polyprism.pyx":676 with * nogil: for i in prange(size): Z1 = z1 - * zp[i] # <<<<<<<<<<<<<< Z2 = z2 * - zp[i] Z1_sqr = Z1**2 */ __pyx_t_5 = __pyx_v_i; __pyx_v_Z1 = (__pyx_v_z1 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_5, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":677 for * i in prange(size): Z1 = z1 - zp[i] Z2 = z2 * - zp[i] # <<<<<<<<<<<<<< * Z1_sqr = Z1**2 Z2_sqr = Z2**2 */ __pyx_t_6 = __pyx_v_i; __pyx_v_Z2 = (__pyx_v_z2 - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_zp.rcbuffer->pybuffer.buf, __pyx_t_6, __pyx_pybuffernd_zp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":678 Z1 = * z1 - zp[i] Z2 = z2 - zp[i] Z1_sqr = Z1**2 * # <<<<<<<<<<<<<< Z2_sqr = Z2**2 v3 = 0 */ __pyx_v_Z1_sqr = pow(__pyx_v_Z1, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":679 Z2 = * z2 - zp[i] Z1_sqr = Z1**2 Z2_sqr = Z2**2 * # <<<<<<<<<<<<<< v3 = 0 v5 = 0 */ __pyx_v_Z2_sqr = pow(__pyx_v_Z2, 2.0); /* * "fatiando/gravmag/_polyprism.pyx":680 * Z1_sqr = Z1**2 Z2_sqr = Z2**2 v3 = 0 * # <<<<<<<<<<<<<< v5 = 0 v6 = 0 */ __pyx_v_v3 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":681 * Z2_sqr = Z2**2 v3 = 0 v5 = 0 # * <<<<<<<<<<<<<< v6 = 0 for k in * range(nverts): */ __pyx_v_v5 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":682 v3 = * 0 v5 = 0 v6 = 0 # * <<<<<<<<<<<<<< for k in range(nverts): X1 * = x[k] - xp[i] */ __pyx_v_v6 = 0.0; /* * "fatiando/gravmag/_polyprism.pyx":683 v5 = * 0 v6 = 0 for k in range(nverts): * # <<<<<<<<<<<<<< X1 = x[k] - xp[i] Y1 = * y[k] - yp[i] */ __pyx_t_7 = __pyx_v_nverts; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8 += 1) { __pyx_v_k = __pyx_t_8; /* * "fatiando/gravmag/_polyprism.pyx":684 * v6 = 0 for k in range(nverts): X1 = * x[k] - xp[i] # * <<<<<<<<<<<<<< Y1 = y[k] - yp[i] kp1 = * (k + 1) % nverts */ __pyx_t_9 = __pyx_v_k; __pyx_t_10 = __pyx_v_i; __pyx_v_X1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_9, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":685 * for k in range(nverts): X1 = x[k] - * xp[i] Y1 = y[k] - yp[i] # * <<<<<<<<<<<<<< kp1 = (k + 1) % nverts * X2 = x[kp1] - xp[i] */ __pyx_t_11 = __pyx_v_k; __pyx_t_12 = __pyx_v_i; __pyx_v_Y1 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":686 * X1 = x[k] - xp[i] Y1 = y[k] - yp[i] * kp1 = (k + 1) % nverts # * <<<<<<<<<<<<<< X2 = x[kp1] - xp[i] Y2 * = y[kp1] - yp[i] */ __pyx_t_13 = (__pyx_v_k + 1); if (unlikely(__pyx_v_nverts == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif { __pyx_filename = __pyx_f[0]; __pyx_lineno = 686; __pyx_clineno = __LINE__; goto __pyx_L8_error; } } __pyx_v_kp1 = __Pyx_mod_long(__pyx_t_13, __pyx_v_nverts); /* * "fatiando/gravmag/_polyprism.pyx":687 * Y1 = y[k] - yp[i] kp1 = (k + 1) % * nverts X2 = x[kp1] - xp[i] * # <<<<<<<<<<<<<< Y2 = y[kp1] - yp[i] * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_14 = __pyx_v_kp1; __pyx_t_15 = __pyx_v_i; __pyx_v_X2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_x.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_x.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_xp.rcbuffer->pybuffer.buf, __pyx_t_15, __pyx_pybuffernd_xp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":688 * kp1 = (k + 1) % nverts X2 = x[kp1] - * xp[i] Y2 = y[kp1] - yp[i] * # <<<<<<<<<<<<<< v3 += kernelxz(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v5 * += kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_t_16 = __pyx_v_kp1; __pyx_t_17 = __pyx_v_i; __pyx_v_Y2 = ((*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_y.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_y.diminfo[0].strides)) - (*__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_yp.rcbuffer->pybuffer.buf, __pyx_t_17, __pyx_pybuffernd_yp.diminfo[0].strides))); /* * "fatiando/gravmag/_polyprism.pyx":689 * X2 = x[kp1] - xp[i] Y2 = y[kp1] - * yp[i] v3 += kernelxz(X1, Y1, X2, Y2, * Z1, Z2, Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v5 += kernelyz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v6 += * kernelzz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) */ __pyx_v_v3 = (__pyx_v_v3 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelxz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":690 * Y2 = y[kp1] - yp[i] v3 += kernelxz(X1, * Y1, X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v5 * += kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< v6 += kernelzz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] * += (v3*mx + v5*my + v6*mz) */ __pyx_v_v5 = (__pyx_v_v5 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelyz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); /* * "fatiando/gravmag/_polyprism.pyx":691 * v3 += kernelxz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v5 += kernelyz(X1, Y1, * X2, Y2, Z1, Z2, Z1_sqr, Z2_sqr) v6 += * kernelzz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) # * <<<<<<<<<<<<<< res[i] += (v3*mx + * v5*my + v6*mz) */ __pyx_v_v6 = (__pyx_v_v6 + __pyx_f_8fatiando_7gravmag_10_polyprism_kernelzz(__pyx_v_X1, __pyx_v_Y1, __pyx_v_X2, __pyx_v_Y2, __pyx_v_Z1, __pyx_v_Z2, __pyx_v_Z1_sqr, __pyx_v_Z2_sqr)); } /* * "fatiando/gravmag/_polyprism.pyx":692 v5 * += kernelyz(X1, Y1, X2, Y2, Z1, Z2, * Z1_sqr, Z2_sqr) v6 += kernelzz(X1, Y1, X2, * Y2, Z1, Z2, Z1_sqr, Z2_sqr) res[i] += * (v3*mx + v5*my + v6*mz) # * <<<<<<<<<<<<<< */ __pyx_t_7 = __pyx_v_i; *__Pyx_BufPtrStrided1d(__pyx_t_8fatiando_7gravmag_10_polyprism_DTYPE_T *, __pyx_pybuffernd_res.rcbuffer->pybuffer.buf, __pyx_t_7, __pyx_pybuffernd_res.diminfo[0].strides) += (((__pyx_v_v3 * __pyx_v_mx) + (__pyx_v_v5 * __pyx_v_my)) + (__pyx_v_v6 * __pyx_v_mz)); goto __pyx_L13; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L12; __pyx_L12:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates10) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_v3; __pyx_parallel_temp1 = __pyx_v_X2; __pyx_parallel_temp2 = __pyx_v_Z1_sqr; __pyx_parallel_temp3 = __pyx_v_kp1; __pyx_parallel_temp4 = __pyx_v_X1; __pyx_parallel_temp5 = __pyx_v_Z1; __pyx_parallel_temp6 = __pyx_v_Z2_sqr; __pyx_parallel_temp7 = __pyx_v_v6; __pyx_parallel_temp8 = __pyx_v_k; __pyx_parallel_temp9 = __pyx_v_Z2; __pyx_parallel_temp10 = __pyx_v_i; __pyx_parallel_temp11 = __pyx_v_v5; __pyx_parallel_temp12 = __pyx_v_Y2; __pyx_parallel_temp13 = __pyx_v_Y1; } __pyx_L13:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* * This may have been overridden by a continue, break or * return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_v3 = __pyx_parallel_temp0; __pyx_v_X2 = __pyx_parallel_temp1; __pyx_v_Z1_sqr = __pyx_parallel_temp2; __pyx_v_kp1 = __pyx_parallel_temp3; __pyx_v_X1 = __pyx_parallel_temp4; __pyx_v_Z1 = __pyx_parallel_temp5; __pyx_v_Z2_sqr = __pyx_parallel_temp6; __pyx_v_v6 = __pyx_parallel_temp7; __pyx_v_k = __pyx_parallel_temp8; __pyx_v_Z2 = __pyx_parallel_temp9; __pyx_v_i = __pyx_parallel_temp10; __pyx_v_v5 = __pyx_parallel_temp11; __pyx_v_Y2 = __pyx_parallel_temp12; __pyx_v_Y1 = __pyx_parallel_temp13; switch (__pyx_parallel_why) { case 3: goto __pyx_L3_return; case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* * "fatiando/gravmag/_polyprism.pyx":674 nverts = len(x) size = * len(res) with nogil: # <<<<<<<<<<<<<< for i in * prange(size): Z1 = z1 - zp[i] */ /* finally: */ { /* normal exit: */ { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L3_return:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L0; } __pyx_L4_error:{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* * "fatiando/gravmag/_polyprism.pyx":660 @cython.wraparound(False) * @cython.boundscheck(False) def bz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb); } __Pyx_AddTraceback("fatiando.gravmag._polyprism.bz", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_res.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_x.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_xp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_y.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_yp.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_zp.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":194 # experimental exception made for __getbuffer__ and * __releasebuffer__ # -- the details of this may change. def * __getbuffer__(ndarray self, Py_buffer* info, int flags): # * <<<<<<<<<<<<<< # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject * __pyx_v_self, Py_buffer * __pyx_v_info, int __pyx_v_flags); /* proto */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject * __pyx_v_self, Py_buffer * __pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *) __pyx_v_self), ((Py_buffer *) __pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject * __pyx_v_self, Py_buffer * __pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":200 # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":203 * * cdef int copy_shape, i, ndim cdef int endian_detector = 1 # * <<<<<<<<<<<<<< cdef bint little_endian = ((<char*>&endian_detector)[0] * != 0) * */ __pyx_v_endian_detector = 1; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":204 cdef int copy_shape, i, ndim cdef int * endian_detector = 1 cdef bint little_endian = * ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":206 cdef bint little_endian = * ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":208 ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":209 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): copy_shape = 1 * # <<<<<<<<<<<<<< else: copy_shape = 0 */ __pyx_v_copy_shape = 1; goto __pyx_L4; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":211 copy_shape = 1 else: copy_shape = 0 * # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_copy_shape = 0; } __pyx_L4:; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":213 copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * # <<<<<<<<<<<<<< and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_1 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":214 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # * <<<<<<<<<<<<<< raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_3 = __pyx_t_2; } else { __pyx_t_3 = __pyx_t_1; } if (__pyx_t_3) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":215 if ((flags & pybuf.PyBUF_C_CONTIGUOUS == * pybuf.PyBUF_C_CONTIGUOUS) and not PyArray_CHKFLAGS(self, * NPY_C_CONTIGUOUS)): raise ValueError(u"ndarray is not C * contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":217 raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * # <<<<<<<<<<<<<< and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_3 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_3) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":218 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # * <<<<<<<<<<<<<< raise ValueError(u"ndarray is not Fortran * contiguous") * */ __pyx_t_1 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_2 = __pyx_t_1; } else { __pyx_t_2 = __pyx_t_3; } if (__pyx_t_2) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":219 if ((flags & pybuf.PyBUF_F_CONTIGUOUS == * pybuf.PyBUF_F_CONTIGUOUS) and not PyArray_CHKFLAGS(self, * NPY_F_CONTIGUOUS)): raise ValueError(u"ndarray is not Fortran * contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":221 raise ValueError(u"ndarray is not Fortran * contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< info.ndim = * ndim if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":222 * * info.buf = PyArray_DATA(self) info.ndim = ndim # * <<<<<<<<<<<<<< if copy_shape: # Allocate new buffer for strides and * shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":223 info.buf = PyArray_DATA(self) info.ndim = ndim if * copy_shape: # <<<<<<<<<<<<<< # Allocate new buffer for * strides and shape info. # This is allocated as one block, strides * first. */ __pyx_t_2 = (__pyx_v_copy_shape != 0); if (__pyx_t_2) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":226 # Allocate new buffer for strides and * shape info. # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * * <size_t>ndim * 2) # <<<<<<<<<<<<<< info.shape = * info.strides + ndim for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *) malloc((((sizeof(Py_ssize_t)) * ((size_t) __pyx_v_ndim)) * 2))); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":227 # This is allocated as one block, strides * first. info.strides = * <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< for * i in range(ndim): info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":228 info.strides = * <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim for i in range(ndim): * # <<<<<<<<<<<<<< info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6 += 1) { __pyx_v_i = __pyx_t_6; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":229 info.shape = info.strides + ndim * for i in range(ndim): info.strides[i] = * PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":230 for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] info.shape[i] = * PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } goto __pyx_L7; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":232 info.shape[i] = PyArray_DIMS(self)[i] * else: info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * # <<<<<<<<<<<<<< info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ __pyx_v_info->strides = ((Py_ssize_t *) PyArray_STRIDES(__pyx_v_self)); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":233 else: info.strides = * <Py_ssize_t*>PyArray_STRIDES(self) info.shape = * <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *) PyArray_DIMS(__pyx_v_self)); } __pyx_L7:; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":234 info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) info.suboffsets = NULL * # <<<<<<<<<<<<<< info.itemsize = PyArray_ITEMSIZE(self) info.readonly * = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":235 info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL info.itemsize = PyArray_ITEMSIZE(self) * # <<<<<<<<<<<<<< info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":236 info.suboffsets = NULL info.itemsize = * PyArray_ITEMSIZE(self) info.readonly = not PyArray_ISWRITEABLE(self) * # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":239 * * cdef int t cdef char* f = NULL # <<<<<<<<<<<<<< cdef dtype * descr = self.descr cdef list stack */ __pyx_v_f = NULL; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":240 cdef int t cdef char* f = NULL cdef dtype descr = * self.descr # <<<<<<<<<<<<<< cdef list stack cdef int * offset */ __pyx_t_4 = ((PyObject *) __pyx_v_self->descr); __Pyx_INCREF(__pyx_t_4); __pyx_v_descr = ((PyArray_Descr *) __pyx_t_4); __pyx_t_4 = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":244 cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # * <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":246 cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< # do * not call releasebuffer info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { __pyx_t_3 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_3; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":248 if not hasfields and not copy_shape: # do * not call releasebuffer info.obj = None # * <<<<<<<<<<<<<< else: # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; goto __pyx_L10; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":251 else: # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ __Pyx_INCREF(((PyObject *) __pyx_v_self)); __Pyx_GIVEREF(((PyObject *) __pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *) __pyx_v_self); } __pyx_L10:; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":253 info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< t = descr.type_num if * ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":254 * * if not hasfields: t = descr.type_num # <<<<<<<<<<<<<< if * ((descr.byteorder == c'>' and little_endian) or (descr.byteorder * == c'<' and not little_endian)): */ __pyx_t_5 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_5; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":255 if not hasfields: t = descr.type_num if * ((descr.byteorder == c'>' and little_endian) or # * <<<<<<<<<<<<<< (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '>') != 0); if (__pyx_t_1) { __pyx_t_2 = (__pyx_v_little_endian != 0); } else { __pyx_t_2 = __pyx_t_1; } if (!__pyx_t_2) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":256 t = descr.type_num if * ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * # <<<<<<<<<<<<<< raise ValueError(u"Non-native byte order not * supported") if t == NPY_BYTE: f = "b" */ __pyx_t_1 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_1) { __pyx_t_3 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_7 = __pyx_t_3; } else { __pyx_t_7 = __pyx_t_1; } __pyx_t_1 = __pyx_t_7; } else { __pyx_t_1 = __pyx_t_2; } if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":257 if ((descr.byteorder == c'>' and * little_endian) or (descr.byteorder == c'<' and not * little_endian)): raise ValueError(u"Non-native byte order not * supported") # <<<<<<<<<<<<<< if t == NPY_BYTE: * f = "b" elif t == NPY_UBYTE: f = "B" */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":274 elif t == NPY_CDOUBLE: f = "Zd" elif * t == NPY_CLONGDOUBLE: f = "Zg" elif t == NPY_OBJECT: f = "O" * # <<<<<<<<<<<<<< else: raise ValueError(u"unknown dtype code in * numpy.pxd (%d)" % t) */ switch (__pyx_v_t) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":258 (descr.byteorder == c'<' and not * little_endian)): raise ValueError(u"Non-native byte order not * supported") if t == NPY_BYTE: f = "b" # * <<<<<<<<<<<<<< elif t == NPY_UBYTE: f = "B" elif t == * NPY_SHORT: f = "h" */ case NPY_BYTE: __pyx_v_f = __pyx_k_b; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":259 raise ValueError(u"Non-native * byte order not supported") if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # * <<<<<<<<<<<<<< elif t == NPY_SHORT: f = "h" elif t == * NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = __pyx_k_B; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":260 if t == NPY_BYTE: f = * "b" elif t == NPY_UBYTE: f = "B" elif t == NPY_SHORT: * f = "h" # <<<<<<<<<<<<<< elif t == NPY_USHORT: * f = "H" elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = __pyx_k_h; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":261 elif t == NPY_UBYTE: f = * "B" elif t == NPY_SHORT: f = "h" elif t == NPY_USHORT: * f = "H" # <<<<<<<<<<<<<< elif t == NPY_INT: * f = "i" elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = __pyx_k_H; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":262 elif t == NPY_SHORT: f = * "h" elif t == NPY_USHORT: f = "H" elif t == NPY_INT: * f = "i" # <<<<<<<<<<<<<< elif t == NPY_UINT: * f = "I" elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = __pyx_k_i; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":263 elif t == NPY_USHORT: f = * "H" elif t == NPY_INT: f = "i" elif t == NPY_UINT: * f = "I" # <<<<<<<<<<<<<< elif t == NPY_LONG: * f = "l" elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = __pyx_k_I; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":264 elif t == NPY_INT: f = * "i" elif t == NPY_UINT: f = "I" elif t == NPY_LONG: * f = "l" # <<<<<<<<<<<<<< elif t == NPY_ULONG: * f = "L" elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = __pyx_k_l; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":265 elif t == NPY_UINT: f = * "I" elif t == NPY_LONG: f = "l" elif t == NPY_ULONG: * f = "L" # <<<<<<<<<<<<<< elif t == NPY_LONGLONG: * f = "q" elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = __pyx_k_L; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":266 elif t == NPY_LONG: f = * "l" elif t == NPY_ULONG: f = "L" elif t == NPY_LONGLONG: * f = "q" # <<<<<<<<<<<<<< elif t == NPY_ULONGLONG: * f = "Q" elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = __pyx_k_q; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":267 elif t == NPY_ULONG: f = * "L" elif t == NPY_LONGLONG: f = "q" elif t == * NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< elif t * == NPY_FLOAT: f = "f" elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = __pyx_k_Q; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":268 elif t == NPY_LONGLONG: f = * "q" elif t == NPY_ULONGLONG: f = "Q" elif t == NPY_FLOAT: * f = "f" # <<<<<<<<<<<<<< elif t == NPY_DOUBLE: * f = "d" elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = __pyx_k_f; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":269 elif t == NPY_ULONGLONG: f = * "Q" elif t == NPY_FLOAT: f = "f" elif t == NPY_DOUBLE: * f = "d" # <<<<<<<<<<<<<< elif t == NPY_LONGDOUBLE: * f = "g" elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = __pyx_k_d; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":270 elif t == NPY_FLOAT: f = * "f" elif t == NPY_DOUBLE: f = "d" elif t == * NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< elif t * == NPY_CFLOAT: f = "Zf" elif t == NPY_CDOUBLE: f = * "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = __pyx_k_g; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":271 elif t == NPY_DOUBLE: f = * "d" elif t == NPY_LONGDOUBLE: f = "g" elif t == NPY_CFLOAT: * f = "Zf" # <<<<<<<<<<<<<< elif t == NPY_CDOUBLE: * f = "Zd" elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = __pyx_k_Zf; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":272 elif t == NPY_LONGDOUBLE: f = * "g" elif t == NPY_CFLOAT: f = "Zf" elif t == NPY_CDOUBLE: * f = "Zd" # <<<<<<<<<<<<<< elif t == * NPY_CLONGDOUBLE: f = "Zg" elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = __pyx_k_Zd; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":273 elif t == NPY_CFLOAT: f = * "Zf" elif t == NPY_CDOUBLE: f = "Zd" elif t == * NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< elif t * == NPY_OBJECT: f = "O" else: */ case NPY_CLONGDOUBLE: __pyx_v_f = __pyx_k_Zg; break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":274 elif t == NPY_CDOUBLE: f = * "Zd" elif t == NPY_CLONGDOUBLE: f = "Zg" elif t == NPY_OBJECT: * f = "O" # <<<<<<<<<<<<<< else: raise * ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = __pyx_k_O; break; default: /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":276 elif t == NPY_OBJECT: f = * "O" else: raise ValueError(u"unknown dtype code in numpy.pxd * (%d)" % t) # <<<<<<<<<<<<<< info.format = f return */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_4); if (unlikely(!__pyx_t_8)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_8); __Pyx_GIVEREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_8)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error; } break; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":277 else: raise ValueError(u"unknown dtype * code in numpy.pxd (%d)" % t) info.format = f # * <<<<<<<<<<<<<< return else: */ __pyx_v_info->format = __pyx_v_f; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":278 raise ValueError(u"unknown dtype code in * numpy.pxd (%d)" % t) info.format = f return # * <<<<<<<<<<<<<< else: info.format = * <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":280 return else: info.format = * <char*>stdlib.malloc(_buffer_format_string_len) # * <<<<<<<<<<<<<< info.format[0] = c'^' # Native data types, manual * alignment offset = 0 */ __pyx_v_info->format = ((char *)malloc(255)); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":281 else: info.format = * <char*>stdlib.malloc(_buffer_format_string_len) info.format[0] = * c'^' # Native data types, manual alignment # * <<<<<<<<<<<<<< offset = 0 f = _util_dtypestring(descr, info.format * + 1, */ (__pyx_v_info->format[0]) = '^'; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":282 info.format = * <char*>stdlib.malloc(_buffer_format_string_len) info.format[0] = * c'^' # Native data types, manual alignment offset = 0 * # <<<<<<<<<<<<<< f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":283 info.format[0] = c'^' # Native data * types, manual alignment offset = 0 f = _util_dtypestring(descr, * info.format + 1, # <<<<<<<<<<<<<< info.format + * _buffer_format_string_len, &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == NULL)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_f = __pyx_t_9; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":286 info.format + _buffer_format_string_len, * &offset) f[0] = c'\0' # Terminate format string # * <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":194 # experimental exception made for __getbuffer__ * and __releasebuffer__ # -- the details of this may change. def * __getbuffer__(ndarray self, Py_buffer* info, int flags): # * <<<<<<<<<<<<<< # This implementation of getbuffer is geared towards * Cython # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *) __pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":288 f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # * <<<<<<<<<<<<<< if PyArray_HASFIELDS(self): stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject * __pyx_v_self, Py_buffer * __pyx_v_info); /* proto */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject * __pyx_v_self, Py_buffer * __pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *) __pyx_v_self), ((Py_buffer *) __pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject * __pyx_v_self, Py_buffer * __pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":289 * * def __releasebuffer__(ndarray self, Py_buffer* info): if * PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":290 def __releasebuffer__(ndarray self, * Py_buffer* info): if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< if * sizeof(npy_intp) != sizeof(Py_ssize_t): stdlib.free(info.strides) */ free(__pyx_v_info->format); goto __pyx_L3; } __pyx_L3:; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":291 if PyArray_HASFIELDS(self): * stdlib.free(info.format) if sizeof(npy_intp) != sizeof(Py_ssize_t): * # <<<<<<<<<<<<<< stdlib.free(info.strides) # info.shape was stored * after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":292 stdlib.free(info.format) if * sizeof(npy_intp) != sizeof(Py_ssize_t): stdlib.free(info.strides) * # <<<<<<<<<<<<<< # info.shape was stored after info.strides in the * same block * */ free(__pyx_v_info->strides); goto __pyx_L4; } __pyx_L4:; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":288 f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # * <<<<<<<<<<<<<< if PyArray_HASFIELDS(self): stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":768 ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject * __pyx_f_5numpy_PyArray_MultiIterNew1(PyObject * __pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject * __pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":769 * * cdef inline object PyArray_MultiIterNew1(a): return * PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":768 ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":771 return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject * __pyx_f_5numpy_PyArray_MultiIterNew2(PyObject * __pyx_v_a, PyObject * __pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject * __pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":772 * * cdef inline object PyArray_MultiIterNew2(a, b): return * PyArray_MultiIterNew(2, <void*>a, <void*>b) # * <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":771 return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # * <<<<<<<<<<<<<< return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":774 return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # * <<<<<<<<<<<<<< return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> * c) * */ static CYTHON_INLINE PyObject * __pyx_f_5numpy_PyArray_MultiIterNew3(PyObject * __pyx_v_a, PyObject * __pyx_v_b, PyObject * __pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject * __pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":775 * * cdef inline object PyArray_MultiIterNew3(a, b, c): return * PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # * <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":774 return PyArray_MultiIterNew(2, <void*>a, * <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # * <<<<<<<<<<<<<< return PyArray_MultiIterNew(3, <void*>a, <void*>b, * <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":777 return PyArray_MultiIterNew(3, <void*>a, <void*>b, * <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # * <<<<<<<<<<<<<< return PyArray_MultiIterNew(4, <void*>a, <void*>b, * <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject * __pyx_f_5numpy_PyArray_MultiIterNew4(PyObject * __pyx_v_a, PyObject * __pyx_v_b, PyObject * __pyx_v_c, PyObject * __pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject * __pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":778 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): return * PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":777 return PyArray_MultiIterNew(3, <void*>a, * <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # * <<<<<<<<<<<<<< return PyArray_MultiIterNew(4, <void*>a, <void*>b, * <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":780 return PyArray_MultiIterNew(4, <void*>a, <void*>b, * <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # * <<<<<<<<<<<<<< return PyArray_MultiIterNew(5, <void*>a, <void*>b, * <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject * __pyx_f_5numpy_PyArray_MultiIterNew5(PyObject * __pyx_v_a, PyObject * __pyx_v_b, PyObject * __pyx_v_c, PyObject * __pyx_v_d, PyObject * __pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject * __pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":781 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): return * PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, * <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* * offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":780 return PyArray_MultiIterNew(4, <void*>a, * <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # * <<<<<<<<<<<<<< return PyArray_MultiIterNew(5, <void*>a, <void*>b, * <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":783 return PyArray_MultiIterNew(5, <void*>a, <void*>b, * <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* * offset) except NULL: # <<<<<<<<<<<<<< # Recursive utility * function used in __getbuffer__ to get format # string. The new location in * the format string is returned. */ static CYTHON_INLINE char * __pyx_f_5numpy__util_dtypestring(PyArray_Descr * __pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject * __pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; long __pyx_t_10; char *__pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":790 cdef int delta_offset cdef tuple i cdef int * endian_detector = 1 # <<<<<<<<<<<<<< cdef bint * little_endian = ((<char*>&endian_detector)[0] != 0) cdef tuple fields */ __pyx_v_endian_detector = 1; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":791 cdef tuple i cdef int endian_detector = 1 cdef * bint little_endian = ((<char*>&endian_detector)[0] != 0) # * <<<<<<<<<<<<<< cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":794 cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< fields = * descr.fields[childname] child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); { __pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":795 * * for childname in descr.names: fields = descr.fields[childname] * # <<<<<<<<<<<<<< child, new_offset = fields * */ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error; }; __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3)) || ((__pyx_t_3) == Py_None) || (PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject *) __pyx_t_3)); __pyx_t_3 = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":796 for childname in descr.names: fields = * descr.fields[childname] child, new_offset = fields # * <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject *sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); { __pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); { __pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *) __pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":798 child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # * <<<<<<<<<<<<<< raise RuntimeError(u"Format string allocated too * short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: raise * RuntimeError(u"Format string allocated too short, see comment * in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":801 raise RuntimeError(u"Format string * allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # * <<<<<<<<<<<<<< (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_6 = ((__pyx_v_child->byteorder == '>') != 0); if (__pyx_t_6) { __pyx_t_7 = (__pyx_v_little_endian != 0); } else { __pyx_t_7 = __pyx_t_6; } if (!__pyx_t_7) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * # <<<<<<<<<<<<<< raise ValueError(u"Non-native byte order not * supported") # One could encode it in the format string and * have Cython */ __pyx_t_6 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_6) { __pyx_t_8 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_9 = __pyx_t_8; } else { __pyx_t_9 = __pyx_t_6; } __pyx_t_6 = __pyx_t_9; } else { __pyx_t_6 = __pyx_t_7; } if (__pyx_t_6) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":803 if ((child.byteorder == c'>' and * little_endian) or (child.byteorder == c'<' and not * little_endian)): raise ValueError(u"Non-native byte order not * supported") # <<<<<<<<<<<<<< # One could encode it * in the format string and have Cython # complain instead, BUT: * < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":813 * * # Output padding bytes while offset[0] < new_offset: # * <<<<<<<<<<<<<< f[0] = 120 # "x"; pad byte f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":814 # Output padding bytes while * offset[0] < new_offset: f[0] = 120 # "x"; pad byte * # <<<<<<<<<<<<<< f += 1 offset[0] += 1 */ (__pyx_v_f[0]) = 120; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":815 while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":816 f[0] = 120 # "x"; pad byte f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + 1); } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":818 offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_10 = 0; (__pyx_v_offset[__pyx_t_10]) = ((__pyx_v_offset[__pyx_t_10]) + __pyx_v_child->elsize); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":820 offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< t = * child.type_num if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): t = child.type_num * # <<<<<<<<<<<<<< if end - f < 5: raise RuntimeError(u"Format * string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":822 if not * PyDataType_HASFIELDS(child): t = child.type_num if end - f < * 5: # <<<<<<<<<<<<<< raise RuntimeError(u"Format * string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/ * Includes/numpy/__init__.pxd":823 t = child.type_num if end * - f < 5: raise RuntimeError(u"Format string allocated too * short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings if * t == NPY_BYTE: f[0] = 98 #"b" # * <<<<<<<<<<<<<< elif t == NPY_UBYTE: f[0] = 66 #"B" elif * t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":827 # Until ticket #99 is fixed, use * integers to avoid warnings if t == NPY_BYTE: f[0] = * 98 #"b" elif t == NPY_UBYTE: f[0] = 66 #"B" * # <<<<<<<<<<<<<< elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":828 if t == NPY_BYTE: f[0] = * 98 #"b" elif t == NPY_UBYTE: f[0] = 66 #"B" elif t == * NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" elif t == NPY_INT: * f[0] = 105 #"i" */ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 104; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":829 elif t == NPY_UBYTE: f[0] = * 66 #"B" elif t == NPY_SHORT: f[0] = 104 #"h" elif t == * NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" elif t == NPY_UINT: * f[0] = 73 #"I" */ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":830 elif t == NPY_SHORT: f[0] = * 104 #"h" elif t == NPY_USHORT: f[0] = 72 #"H" elif t == * NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" elif t == NPY_LONG: * f[0] = 108 #"l" */ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 105; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":831 elif t == NPY_USHORT: f[0] = * 72 #"H" elif t == NPY_INT: f[0] = 105 #"i" elif t == * NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" elif t == * NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":832 elif t == NPY_INT: f[0] = * 105 #"i" elif t == NPY_UINT: f[0] = 73 #"I" elif t == * NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" elif t == * NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 108; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":833 elif t == NPY_UINT: f[0] = * 73 #"I" elif t == NPY_LONG: f[0] = 108 #"l" elif t == * NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" elif t == * NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":834 elif t == NPY_LONG: f[0] = * 108 #"l" elif t == NPY_ULONG: f[0] = 76 #"L" elif t == * NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" elif t == * NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 113; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":835 elif t == NPY_ULONG: f[0] = * 76 #"L" elif t == NPY_LONGLONG: f[0] = 113 #"q" elif t == * NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" elif t == * NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":836 elif t == NPY_LONGLONG: f[0] = * 113 #"q" elif t == NPY_ULONGLONG: f[0] = 81 #"Q" elif t == * NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" elif t == * NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 102; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":837 elif t == NPY_ULONGLONG: f[0] = * 81 #"Q" elif t == NPY_FLOAT: f[0] = 102 #"f" elif t == * NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" elif t == * NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 100; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":838 elif t == NPY_FLOAT: f[0] = * 102 #"f" elif t == NPY_DOUBLE: f[0] = 100 #"d" elif t == * NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 103; goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":839 elif t == NPY_DOUBLE: f[0] = * 100 #"d" elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" elif t == * NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * # <<<<<<<<<<<<<< elif t == NPY_CDOUBLE: f[0] = 90; f[1] = * 100; f += 1 # Zd elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = * 103; f += 1 # Zg */ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 102; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":840 elif t == NPY_LONGDOUBLE: f[0] = * 103 #"g" elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f * += 1 # Zf elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f * += 1 # Zd # <<<<<<<<<<<<<< elif t == * NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg elif t == * NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 100; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":841 elif t == NPY_CFLOAT: f[0] = * 90; f[1] = 102; f += 1 # Zf elif t == NPY_CDOUBLE: f[0] = * 90; f[1] = 100; f += 1 # Zd elif t == NPY_CLONGDOUBLE: f[0] = * 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< elif * t == NPY_OBJECT: f[0] = 79 #"O" else: */ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 103; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L11; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":842 elif t == NPY_CDOUBLE: f[0] = * 90; f[1] = 100; f += 1 # Zd elif t == NPY_CLONGDOUBLE: f[0] = * 90; f[1] = 103; f += 1 # Zg elif t == NPY_OBJECT: f[0] = * 79 #"O" # <<<<<<<<<<<<<< else: raise * ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L11; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/ * Includes/numpy/__init__.pxd":844 elif t == NPY_OBJECT: * f[0] = 79 #"O" else: raise ValueError(u"unknown dtype code * in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< f += * 1 else: */ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; { __pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } __pyx_L11:; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":845 else: raise ValueError(u"unknown * dtype code in numpy.pxd (%d)" % t) f += 1 # * <<<<<<<<<<<<<< else: # Cython ignores struct boundary * information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L9; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Incl * udes/numpy/__init__.pxd":849 # Cython ignores struct boundary * information ("T{...}"), # so don't output it f = * _util_dtypestring(child, f, end, offset) # * <<<<<<<<<<<<<< return f * */ __pyx_t_11 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_11 == NULL)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_v_f = __pyx_t_11; } __pyx_L9:; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":850 # so don't output it f = _util_dtypestring(child, * f, end, offset) return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":783 return PyArray_MultiIterNew(5, <void*>a, * <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* * offset) except NULL: # <<<<<<<<<<<<<< # Recursive utility * function used in __getbuffer__ to get format # string. The new * location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *) __pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # * <<<<<<<<<<<<<< cdef PyObject* baseptr if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject * __pyx_v_arr, PyObject * __pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":968 cdef inline void set_array_base(ndarray arr, * object base): cdef PyObject* baseptr if base is None: # * <<<<<<<<<<<<<< baseptr = NULL else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":969 cdef PyObject* baseptr if base is None: * baseptr = NULL # <<<<<<<<<<<<<< else: Py_INCREF(base) * # important to do this before decref below! */ __pyx_v_baseptr = NULL; goto __pyx_L3; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":971 baseptr = NULL else: Py_INCREF(base) # * important to do this before decref below! # * <<<<<<<<<<<<<< baseptr = <PyObject*>base Py_XDECREF(arr.base) */ Py_INCREF(__pyx_v_base); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":972 else: Py_INCREF(base) # important to do * this before decref below! baseptr = <PyObject*>base # * <<<<<<<<<<<<<< Py_XDECREF(arr.base) arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *) __pyx_v_base); } __pyx_L3:; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":973 Py_INCREF(base) # important to do this before * decref below! baseptr = <PyObject*>base Py_XDECREF(arr.base) * # <<<<<<<<<<<<<< arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":974 baseptr = <PyObject*>base Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # * <<<<<<<<<<<<<< cdef PyObject* baseptr if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/_ * _init__.pxd":976 arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: return None */ static CYTHON_INLINE PyObject * __pyx_f_5numpy_get_array_base(PyArrayObject * __pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): if arr.base is NULL: * # <<<<<<<<<<<<<< return None else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":978 cdef inline object get_array_base(ndarray * arr): if arr.base is NULL: return None # * <<<<<<<<<<<<<< else: return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /* else */ { /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes * /numpy/__init__.pxd":980 return None else: return <object>arr.base * # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *) __pyx_v_arr->base)); __pyx_r = ((PyObject *) __pyx_v_arr->base); goto __pyx_L0; } /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":976 arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # * <<<<<<<<<<<<<< if arr.base is NULL: return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 {PyObject_HEAD_INIT(NULL) NULL, 0, NULL}, #else PyModuleDef_HEAD_INIT, #endif __Pyx_NAMESTR("_polyprism"), __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */ , NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_DTYPE, __pyx_k_DTYPE, sizeof(__pyx_k_DTYPE), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_X1, __pyx_k_X1, sizeof(__pyx_k_X1), 0, 0, 1, 1}, {&__pyx_n_s_X2, __pyx_k_X2, sizeof(__pyx_k_X2), 0, 0, 1, 1}, {&__pyx_n_s_Y1, __pyx_k_Y1, sizeof(__pyx_k_Y1), 0, 0, 1, 1}, {&__pyx_n_s_Y2, __pyx_k_Y2, sizeof(__pyx_k_Y2), 0, 0, 1, 1}, {&__pyx_n_s_Z1, __pyx_k_Z1, sizeof(__pyx_k_Z1), 0, 0, 1, 1}, {&__pyx_n_s_Z1_sqr, __pyx_k_Z1_sqr, sizeof(__pyx_k_Z1_sqr), 0, 0, 1, 1}, {&__pyx_n_s_Z2, __pyx_k_Z2, sizeof(__pyx_k_Z2), 0, 0, 1, 1}, {&__pyx_n_s_Z2_sqr, __pyx_k_Z2_sqr, sizeof(__pyx_k_Z2_sqr), 0, 0, 1, 1}, {&__pyx_n_s_bx, __pyx_k_bx, sizeof(__pyx_k_bx), 0, 0, 1, 1}, {&__pyx_n_s_by, __pyx_k_by, sizeof(__pyx_k_by), 0, 0, 1, 1}, {&__pyx_n_s_bz, __pyx_k_bz, sizeof(__pyx_k_bz), 0, 0, 1, 1}, {&__pyx_n_s_density, __pyx_k_density, sizeof(__pyx_k_density), 0, 0, 1, 1}, {&__pyx_n_s_fatiando_gravmag__polyprism, __pyx_k_fatiando_gravmag__polyprism, sizeof(__pyx_k_fatiando_gravmag__polyprism), 0, 0, 1, 1}, {&__pyx_n_s_float, __pyx_k_float, sizeof(__pyx_k_float), 0, 0, 1, 1}, {&__pyx_n_s_fx, __pyx_k_fx, sizeof(__pyx_k_fx), 0, 0, 1, 1}, {&__pyx_n_s_fy, __pyx_k_fy, sizeof(__pyx_k_fy), 0, 0, 1, 1}, {&__pyx_n_s_fz, __pyx_k_fz, sizeof(__pyx_k_fz), 0, 0, 1, 1}, {&__pyx_n_s_gxx, __pyx_k_gxx, sizeof(__pyx_k_gxx), 0, 0, 1, 1}, {&__pyx_n_s_gxy, __pyx_k_gxy, sizeof(__pyx_k_gxy), 0, 0, 1, 1}, {&__pyx_n_s_gxz, __pyx_k_gxz, sizeof(__pyx_k_gxz), 0, 0, 1, 1}, {&__pyx_n_s_gyy, __pyx_k_gyy, sizeof(__pyx_k_gyy), 0, 0, 1, 1}, {&__pyx_n_s_gyz, __pyx_k_gyz, sizeof(__pyx_k_gyz), 0, 0, 1, 1}, {&__pyx_n_s_gz, __pyx_k_gz, sizeof(__pyx_k_gz), 0, 0, 1, 1}, {&__pyx_n_s_gzz, __pyx_k_gzz, sizeof(__pyx_k_gzz), 0, 0, 1, 1}, {&__pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_k_home_leo_src_fatiando_fatiando, sizeof(__pyx_k_home_leo_src_fatiando_fatiando), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_kernel, __pyx_k_kernel, sizeof(__pyx_k_kernel), 0, 0, 1, 1}, {&__pyx_n_s_kp1, __pyx_k_kp1, sizeof(__pyx_k_kp1), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mx, __pyx_k_mx, sizeof(__pyx_k_mx), 0, 0, 1, 1}, {&__pyx_n_s_my, __pyx_k_my, sizeof(__pyx_k_my), 0, 0, 1, 1}, {&__pyx_n_s_mz, __pyx_k_mz, sizeof(__pyx_k_mz), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_nverts, __pyx_k_nverts, sizeof(__pyx_k_nverts), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_res, __pyx_k_res, sizeof(__pyx_k_res), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_tf, __pyx_k_tf, sizeof(__pyx_k_tf), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_v1, __pyx_k_v1, sizeof(__pyx_k_v1), 0, 0, 1, 1}, {&__pyx_n_s_v2, __pyx_k_v2, sizeof(__pyx_k_v2), 0, 0, 1, 1}, {&__pyx_n_s_v3, __pyx_k_v3, sizeof(__pyx_k_v3), 0, 0, 1, 1}, {&__pyx_n_s_v4, __pyx_k_v4, sizeof(__pyx_k_v4), 0, 0, 1, 1}, {&__pyx_n_s_v5, __pyx_k_v5, sizeof(__pyx_k_v5), 0, 0, 1, 1}, {&__pyx_n_s_v6, __pyx_k_v6, sizeof(__pyx_k_v6), 0, 0, 1, 1}, {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, {&__pyx_n_s_xp, __pyx_k_xp, sizeof(__pyx_k_xp), 0, 0, 1, 1}, {&__pyx_n_s_y, __pyx_k_y, sizeof(__pyx_k_y), 0, 0, 1, 1}, {&__pyx_n_s_yp, __pyx_k_yp, sizeof(__pyx_k_yp), 0, 0, 1, 1}, {&__pyx_n_s_z1, __pyx_k_z1, sizeof(__pyx_k_z1), 0, 0, 1, 1}, {&__pyx_n_s_z2, __pyx_k_z2, sizeof(__pyx_k_z2), 0, 0, 1, 1}, {&__pyx_n_s_zp, __pyx_k_zp, sizeof(__pyx_k_zp), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 353; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error; } return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":215 if ((flags & pybuf.PyBUF_C_CONTIGUOUS == * pybuf.PyBUF_C_CONTIGUOUS) and not PyArray_CHKFLAGS(self, * NPY_C_CONTIGUOUS)): raise ValueError(u"ndarray is not C contiguous") * # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":219 if ((flags & pybuf.PyBUF_F_CONTIGUOUS == * pybuf.PyBUF_F_CONTIGUOUS) and not PyArray_CHKFLAGS(self, * NPY_F_CONTIGUOUS)): raise ValueError(u"ndarray is not Fortran * contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":257 if ((descr.byteorder == c'>' and little_endian) * or (descr.byteorder == c'<' and not little_endian)): raise * ValueError(u"Non-native byte order not supported") # * <<<<<<<<<<<<<< if t == NPY_BYTE: f = "b" elif t == NPY_UBYTE: * f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: raise * RuntimeError(u"Format string allocated too short, see comment in * numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":803 if ((child.byteorder == c'>' and little_endian) * or (child.byteorder == c'<' and not little_endian)): raise * ValueError(u"Non-native byte order not supported") # * <<<<<<<<<<<<<< # One could encode it in the format string and have * Cython # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":823 t = child.type_num if end - f < 5: raise * RuntimeError(u"Format string allocated too short.") # * <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* * "fatiando/gravmag/_polyprism.pyx":334 @cython.wraparound(False) * @cython.boundscheck(False) def gz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__7 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__7)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject *) __Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gz, 334, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":364 @cython.wraparound(False) * @cython.boundscheck(False) def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__9 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__9)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); __pyx_codeobj__10 = (PyObject *) __Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__9, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxx, 364, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__10)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":394 @cython.wraparound(False) * @cython.boundscheck(False) def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__11 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__11)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject *) __Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxy, 394, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":424 @cython.wraparound(False) * @cython.boundscheck(False) def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__13 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__13)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject *) __Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gxz, 424, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":454 @cython.wraparound(False) * @cython.boundscheck(False) def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__15 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__15)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject *) __Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyy, 454, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":484 @cython.wraparound(False) * @cython.boundscheck(False) def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__17 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__17)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); __pyx_codeobj__18 = (PyObject *) __Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gyz, 484, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":514 @cython.wraparound(False) * @cython.boundscheck(False) def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__19 = PyTuple_Pack(23, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_density, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_kernel, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__19)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject *) __Pyx_PyCode_New(9, 0, 23, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_gzz, 514, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":544 @cython.wraparound(False) * @cython.boundscheck(False) def tf(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__21 = PyTuple_Pack(33, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_fx, __pyx_n_s_fy, __pyx_n_s_fz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__21)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __pyx_codeobj__22 = (PyObject *) __Pyx_PyCode_New(14, 0, 33, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__21, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_tf, 544, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__22)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":588 @cython.wraparound(False) * @cython.boundscheck(False) def bx(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__23 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v1, __pyx_n_s_v2, __pyx_n_s_v3, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__23)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); __pyx_codeobj__24 = (PyObject *) __Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bx, 588, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":624 @cython.wraparound(False) * @cython.boundscheck(False) def by(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__25 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v2, __pyx_n_s_v4, __pyx_n_s_v5, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__25)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject *) __Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_by, 624, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /* * "fatiando/gravmag/_polyprism.pyx":660 @cython.wraparound(False) * @cython.boundscheck(False) def bz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_tuple__27 = PyTuple_Pack(27, __pyx_n_s_xp, __pyx_n_s_yp, __pyx_n_s_zp, __pyx_n_s_x, __pyx_n_s_y, __pyx_n_s_z1, __pyx_n_s_z2, __pyx_n_s_mx, __pyx_n_s_my, __pyx_n_s_mz, __pyx_n_s_res, __pyx_n_s_nverts, __pyx_n_s_size, __pyx_n_s_i, __pyx_n_s_k, __pyx_n_s_kp1, __pyx_n_s_v3, __pyx_n_s_v5, __pyx_n_s_v6, __pyx_n_s_X1, __pyx_n_s_Y1, __pyx_n_s_X2, __pyx_n_s_Y2, __pyx_n_s_Z1, __pyx_n_s_Z2, __pyx_n_s_Z1_sqr, __pyx_n_s_Z2_sqr); if (unlikely(!__pyx_tuple__27)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject *) __Pyx_PyCode_New(11, 0, 27, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_leo_src_fatiando_fatiando, __pyx_n_s_bz, 660, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (__Pyx_InitStrings(__pyx_string_tab) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; }; return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC init_polyprism(void); /* proto */ PyMODINIT_FUNC init_polyprism(void) #else PyMODINIT_FUNC PyInit__polyprism(void); /* proto */ PyMODINIT_FUNC PyInit__polyprism(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit__polyprism(void)", 0); if (__Pyx_check_binary_version() < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("_polyprism"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_This_is_a_Cython_implementation), 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; }; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } #endif if (__pyx_module_is_main_fatiando__gravmag___polyprism) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; }; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } if (!PyDict_GetItemString(modules, "fatiando.gravmag._polyprism")) { if (unlikely(PyDict_SetItemString(modules, "fatiando.gravmag._polyprism", __pyx_m) < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) { __pyx_filename = __pyx_f[2]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 169; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) { __pyx_filename = __pyx_f[1]; __pyx_lineno = 861; __pyx_clineno = __LINE__; goto __pyx_L1_error; } /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* * "fatiando/gravmag/_polyprism.pyx":6 A pure python implementation is in * _polyprism_numpy.py """ import numpy # <<<<<<<<<<<<<< * * from libc.math cimport log, atan2, sqrt */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_numpy, __pyx_t_1) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* * "fatiando/gravmag/_polyprism.pyx":15 from cython.parallel cimport * prange, parallel * * DTYPE = numpy.float # <<<<<<<<<<<<<< ctypedef numpy.float_t * DTYPE_T * */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_numpy); if (unlikely(!__pyx_t_1)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_float); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_DTYPE, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":334 @cython.wraparound(False) * @cython.boundscheck(False) def gz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_1gz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gz, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":364 @cython.wraparound(False) * @cython.boundscheck(False) def gxx(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_3gxx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxx, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":394 @cython.wraparound(False) * @cython.boundscheck(False) def gxy(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_5gxy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxy, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 394; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":424 @cython.wraparound(False) * @cython.boundscheck(False) def gxz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_7gxz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gxz, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 424; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":454 @cython.wraparound(False) * @cython.boundscheck(False) def gyy(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_9gyy, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyy, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":484 @cython.wraparound(False) * @cython.boundscheck(False) def gyz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_11gyz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gyz, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 484; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":514 @cython.wraparound(False) * @cython.boundscheck(False) def gzz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_13gzz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gzz, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":544 @cython.wraparound(False) * @cython.boundscheck(False) def tf(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_15tf, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_tf, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":588 @cython.wraparound(False) * @cython.boundscheck(False) def bx(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_17bx, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bx, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 588; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":624 @cython.wraparound(False) * @cython.boundscheck(False) def by(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_19by, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_by, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 624; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":660 @cython.wraparound(False) * @cython.boundscheck(False) def bz(numpy.ndarray[DTYPE_T, ndim=1] xp * not None, # <<<<<<<<<<<<<< numpy.ndarray[DTYPE_T, ndim=1] * yp not None, numpy.ndarray[DTYPE_T, ndim=1] zp not None, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_8fatiando_7gravmag_10_polyprism_21bz, NULL, __pyx_n_s_fatiando_gravmag__polyprism); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_bz, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "fatiando/gravmag/_polyprism.pyx":1 #cython: embedsignature=True * # <<<<<<<<<<<<<< """ This is a Cython implementation of the potential * fields of a polygonal prism. */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) { __pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* * "/home/leo/bin/anaconda/lib/python2.7/site-packages/Cython/Includes/num * py/__init__.pxd":976 arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # * <<<<<<<<<<<<<< if arr.base is NULL: return None */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init fatiando.gravmag._polyprism", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init fatiando.gravmag._polyprism"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct * __Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *) r; } #endif /* CYTHON_REFNANNY */ static PyObject * __Pyx_GetBuiltinName(PyObject * name) { PyObject *result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static CYTHON_INLINE void __Pyx_ErrRestore(PyObject * type, PyObject * value, PyObject * tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject ** type, PyObject ** value, PyObject ** tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } static void __Pyx_RaiseArgtupleInvalid( const char *func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char *func_name, PyObject * kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject * kwds, PyObject ** argnames[], PyObject * kwds2, PyObject * values[], Py_ssize_t num_pos_args, const char *function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject ***name; PyObject ***first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name - argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name - argnames] = value; break; } name++; } if (*name) continue; else { PyObject ***argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name - argnames] = value; break; } name++; } if (*name) continue; else { PyObject ***argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static void __Pyx_RaiseArgumentTypeInvalid(const char *name, PyObject * obj, PyTypeObject * type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject * obj, PyTypeObject * type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char *)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context * ctx, __Pyx_BufFmt_StackElem * stack, __Pyx_TypeInfo * type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char **ts) { int count; const char *t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError, \ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char * __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void *); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void *); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void *); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* * These are for computing the padding at the end of the struct to align on * the first member of the struct. This will probably the same as above, but * we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void *); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context * ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char *expected; const char *quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField *field = ctx->head->field; __Pyx_StructField *parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context * ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField *field = ctx->head->field; __Pyx_TypeInfo *type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t) ctx->fmt_offset, (Py_ssize_t) offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context * ctx, const char **tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; /* not a 'break' in the loop */ } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char * __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context * ctx, const char *ts) { int got_Z = 0; while (1) { switch (*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case 10: case 13: ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char *ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct * element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct * element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } /* fall through */ case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 's': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; } else { if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; } ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while (*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t) number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer * buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer * buf, PyObject * obj, __Pyx_TypeInfo * dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem * stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t) dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer * info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static CYTHON_INLINE long __Pyx_mod_long(long a, long b) { long r = a % b; r += ((r != 0) & ((r ^ b) < 0)) * b; return r; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject * __Pyx_PyObject_Call(PyObject * func, PyObject * arg, PyObject * kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 if (unlikely(Py_EnterRecursiveCall((char *)" while calling a Python object"))) return NULL; #endif result = (*call) (func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 Py_LeaveRecursiveCall(); #endif if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject * type, PyObject * value, PyObject * tb, CYTHON_UNUSED PyObject * cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject *) ((PyInstanceObject *) type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject *) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *) type, (PyTypeObject *) PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject * type, PyObject * value, PyObject * tb, PyObject * cause) { PyObject *owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject *) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject *) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject *tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject * obj, PyTypeObject * type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE PyObject * __Pyx_GetModuleGlobalName(PyObject * name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (result) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject * obj, Py_buffer * view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *getbuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer); if (getbuffer_cobj) { getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } } #endif PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer * view) { PyObject *obj = view->obj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *releasebuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer); if (releasebuffer_cobj) { releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject * __Pyx_Import(PyObject * name, PyObject * from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } #else if (level > 0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static CYTHON_INLINE PyObject * __Pyx_PyInt_From_unsigned_int(unsigned int value) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(unsigned int) < sizeof(long)) { return PyInt_FromLong((long)value); } else if (sizeof(unsigned int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long)value); } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long)value); } } else { if (sizeof(unsigned int) <= sizeof(long)) { return PyInt_FromLong((long)value); } else if (sizeof(unsigned int) <= sizeof(long long)) { return PyLong_FromLongLong((long long)value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(unsigned int), little, !is_unsigned); } } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \ { \ func_type value = func(x); \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ PyErr_SetString(PyExc_OverflowError, \ (is_unsigned && unlikely(value < zero)) ? \ "can't convert negative value to " #target_type : \ "value too large to convert to " #target_type); \ return (target_type) -1; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE unsigned int __Pyx_PyInt_As_unsigned_int(PyObject * x) { const unsigned int neg_one = (unsigned int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(unsigned int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int)-1; } return (unsigned int)val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (unsigned int)((PyLongObject *) x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to unsigned int"); return (unsigned int)-1; } if (sizeof(unsigned int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(unsigned int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(unsigned int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(unsigned int)((PyLongObject *) x)->ob_digit[0]; case -1: return -(unsigned int)((PyLongObject *) x)->ob_digit[0]; } } #endif #endif if (sizeof(unsigned int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long, PyLong_AsLong) } else if (sizeof(unsigned int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(unsigned int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else unsigned int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *) v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (unsigned int)-1; } } else { unsigned int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (unsigned int)-1; val = __Pyx_PyInt_As_unsigned_int(tmp); Py_DECREF(tmp); return val; } } #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return: : std: :complex < float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y * (__pyx_t_float_complex) _Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real * z.real + z.imag * z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return: : std: :complex < double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y * (__pyx_t_double_complex) _Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real * z.real + z.imag * z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif static CYTHON_INLINE PyObject * __Pyx_PyInt_From_int(int value) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long)value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long)value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long)value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long)value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long)value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject * x) { const int neg_one = (int)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int)-1; } return (int)val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (int)((PyLongObject *) x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int)-1; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(int)((PyLongObject *) x)->ob_digit[0]; case -1: return -(int)((PyLongObject *) x)->ob_digit[0]; } } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *) v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int)-1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int)-1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PyObject * __Pyx_PyInt_From_long(long value) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long)value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long)value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long)value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long)value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long)value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject * x) { const long neg_one = (long)-1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } return (long)val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (long)((PyLongObject *) x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long)-1; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(long)((PyLongObject *) x)->ob_digit[0]; case -1: return -(long)((PyLongObject *) x)->ob_digit[0]; } } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *) v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long)-1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long)-1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject * __Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject * __Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *) result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t) - 1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t) basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility", module_name, class_name); #if PY_VERSION_HEX < 0x02050000 if (PyErr_Warn(NULL, warning) < 0) goto bad; #else if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; #endif } else if ((size_t) basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling", module_name, class_name); goto bad; } return (PyTypeObject *) result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry * entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject * __pyx_find_code_object(int code_line) { PyCodeObject *code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject * code_object) { int pos, i; __Pyx_CodeObjectCacheEntry *entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry *) PyMem_Malloc(64 * sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject *tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry *) PyMem_Realloc( __pyx_code_cache.entries, new_max * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i = __pyx_code_cache.count; i > pos; i--) { entries[i] = entries[i - 1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject * __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat("%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat("%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /* int argcount, */ 0, /* int kwonlyargcount, */ 0, /* int nlocals, */ 0, /* int stacksize, */ 0, /* int flags, */ __pyx_empty_bytes, /* PyObject *code, */ __pyx_empty_tuple, /* PyObject *consts, */ __pyx_empty_tuple, /* PyObject *names, */ __pyx_empty_tuple, /* PyObject *varnames, */ __pyx_empty_tuple, /* PyObject *freevars, */ __pyx_empty_tuple, /* PyObject *cellvars, */ py_srcfile, /* PyObject *filename, */ py_funcname, /* PyObject *name, */ py_line, /* int firstlineno, */ __pyx_empty_bytes /* PyObject *lnotab */ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /* PyThreadState *tstate, */ py_code, /* PyCodeObject *code, */ py_globals, /* PyObject *globals, */ 0 /* PyObject *locals */ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry * t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject * __Pyx_PyUnicode_FromString(char *c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str)); } static CYTHON_INLINE char * __Pyx_PyObject_AsString(PyObject * o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char * __Pyx_PyObject_AsStringAndSize(PyObject * o, Py_ssize_t * length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char *defenc_c; PyObject *defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char *end = defenc_c + PyBytes_GET_SIZE(defenc); char *c; for (c = defenc_c; c < end; c++) { if ((unsigned char)(*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else /* PY_VERSION_HEX < 0x03030000 */ if (PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_DATA_SIZE(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ return PyUnicode_AsUTF8AndSize(o, length); #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ #endif /* PY_VERSION_HEX < 0x03030000 */ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || * __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ #if !CYTHON_COMPILING_IN_PYPY #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif #endif { char *result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject * x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject * __Pyx_PyNumber_Int(PyObject * x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject * b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit) ((PyLongObject *) b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject *) b)->ob_digit[0]; } #endif #endif #if PY_VERSION_HEX < 0x02060000 return PyInt_AsSsize_t(b); #else return PyLong_AsSsize_t(b); #endif } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *)&ival; int one = 1; int little = (int)*(unsigned char *)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } #endif /* Py_PYTHON_H */
GB_binop__times_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_int64 // A.*B function (eWiseMult): GB_AemultB__times_int64 // A*D function (colscale): GB_AxD__times_int64 // D*A function (rowscale): GB_DxB__times_int64 // C+=B function (dense accum): GB_Cdense_accumB__times_int64 // C+=b function (dense accum): GB_Cdense_accumb__times_int64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int64 // C=scalar+B GB_bind1st__times_int64 // C=scalar+B' GB_bind1st_tran__times_int64 // C=A+scalar GB_bind2nd__times_int64 // C=A'+scalar GB_bind2nd_tran__times_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT64 || GxB_NO_TIMES_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_int64 // A.*B function (eWiseMult): GB_AemultB__times_int64 // A*D function (colscale): GB_AxD__times_int64 // D*A function (rowscale): GB_DxB__times_int64 // C+=B function (dense accum): GB_Cdense_accumB__times_int64 // C+=b function (dense accum): GB_Cdense_accumb__times_int64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int64 // C=scalar+B GB_bind1st__times_int64 // C=scalar+B' GB_bind1st_tran__times_int64 // C=A+scalar GB_bind2nd__times_int64 // C=A'+scalar GB_bind2nd_tran__times_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT64 || GxB_NO_TIMES_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_int64 // A.*B function (eWiseMult): GB_AemultB__times_int64 // A*D function (colscale): GB_AxD__times_int64 // D*A function (rowscale): GB_DxB__times_int64 // C+=B function (dense accum): GB_Cdense_accumB__times_int64 // C+=b function (dense accum): GB_Cdense_accumb__times_int64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int64 // C=scalar+B GB_bind1st__times_int64 // C=scalar+B' GB_bind1st_tran__times_int64 // C=A+scalar GB_bind2nd__times_int64 // C=A'+scalar GB_bind2nd_tran__times_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT64 || GxB_NO_TIMES_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
TBBHashmap.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <tbb/concurrent_unordered_map.h> #include <limits> #include <unordered_map> #include "open3d/core/hashmap/CPU/CPUHashmapBufferAccessor.hpp" #include "open3d/core/hashmap/DeviceHashmap.h" namespace open3d { namespace core { template <typename Key, typename Hash> class TBBHashmap : public DeviceHashmap { public: TBBHashmap(int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device); ~TBBHashmap(); void Rehash(int64_t buckets) override; void Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Erase(const void* input_keys, bool* output_masks, int64_t count) override; int64_t GetActiveIndices(addr_t* output_indices) override; int64_t Size() const override; int64_t GetBucketCount() const override; std::vector<int64_t> BucketSizes() const override; float LoadFactor() const override; std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> GetImpl() const { return impl_; } protected: std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> impl_; std::shared_ptr<CPUHashmapBufferAccessor> buffer_ctx_; void InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count); void Allocate(int64_t capacity); }; template <typename Key, typename Hash> TBBHashmap<Key, Hash>::TBBHashmap(int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device) : DeviceHashmap(init_capacity, dsize_key, dsize_value, device) { Allocate(init_capacity); } template <typename Key, typename Hash> TBBHashmap<Key, Hash>::~TBBHashmap() {} template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::Size() const { return impl_->size(); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { int64_t new_size = Size() + count; if (new_size > this->capacity_) { int64_t bucket_count = GetBucketCount(); float avg_capacity_per_bucket = float(this->capacity_) / float(bucket_count); int64_t expected_buckets = std::max( bucket_count * 2, int64_t(std::ceil(new_size / avg_capacity_per_bucket))); Rehash(expected_buckets); } InsertImpl(input_keys, input_values, output_addrs, output_masks, count); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { Insert(input_keys, nullptr, output_addrs, output_masks, count); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); #pragma omp parallel for for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; output_addrs[i] = flag ? iter->second : 0; } } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Erase(const void* input_keys, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; if (flag) { buffer_ctx_->DeviceFree(iter->second); impl_->unsafe_erase(iter); } } } template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::GetActiveIndices(addr_t* output_indices) { int64_t count = impl_->size(); int64_t i = 0; for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) { output_indices[i] = static_cast<int64_t>(iter->second); } return count; } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Rehash(int64_t buckets) { int64_t iterator_count = Size(); Tensor active_keys; Tensor active_values; if (iterator_count > 0) { Tensor active_addrs({iterator_count}, Dtype::Int32, this->device_); GetActiveIndices(static_cast<addr_t*>(active_addrs.GetDataPtr())); Tensor active_indices = active_addrs.To(Dtype::Int64); active_keys = this->GetKeyBuffer().IndexGet({active_indices}); active_values = this->GetValueBuffer().IndexGet({active_indices}); } float avg_capacity_per_bucket = float(this->capacity_) / float(GetBucketCount()); int64_t new_capacity = int64_t(std::ceil(buckets * avg_capacity_per_bucket)); Allocate(new_capacity); if (iterator_count > 0) { Tensor output_addrs({iterator_count}, Dtype::Int32, this->device_); Tensor output_masks({iterator_count}, Dtype::Bool, this->device_); InsertImpl(active_keys.GetDataPtr(), active_values.GetDataPtr(), static_cast<addr_t*>(output_addrs.GetDataPtr()), output_masks.GetDataPtr<bool>(), iterator_count); } impl_->rehash(buckets); } template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::GetBucketCount() const { return impl_->unsafe_bucket_count(); } template <typename Key, typename Hash> std::vector<int64_t> TBBHashmap<Key, Hash>::BucketSizes() const { int64_t bucket_count = impl_->unsafe_bucket_count(); std::vector<int64_t> ret; for (int64_t i = 0; i < bucket_count; ++i) { ret.push_back(impl_->unsafe_bucket_size(i)); } return ret; } template <typename Key, typename Hash> float TBBHashmap<Key, Hash>::LoadFactor() const { return impl_->load_factor(); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); #pragma omp parallel for for (int64_t i = 0; i < count; ++i) { output_addrs[i] = 0; output_masks[i] = false; const Key& key = input_keys_templated[i]; // Try to insert a dummy address. auto res = impl_->insert({key, 0}); // Lazy copy key value pair to buffer only if succeeded if (res.second) { addr_t dst_kv_addr = buffer_ctx_->DeviceAllocate(); auto dst_kv_iter = buffer_ctx_->ExtractIterator(dst_kv_addr); // Copy templated key to buffer *static_cast<Key*>(dst_kv_iter.first) = key; // Copy/reset non-templated value in buffer uint8_t* dst_value = static_cast<uint8_t*>(dst_kv_iter.second); if (input_values != nullptr) { const uint8_t* src_value = static_cast<const uint8_t*>(input_values) + this->dsize_value_ * i; std::memcpy(dst_value, src_value, this->dsize_value_); } else { std::memset(dst_value, 0, this->dsize_value_); } // Update from dummy 0 res.first->second = dst_kv_addr; // Write to return variables output_addrs[i] = dst_kv_addr; output_masks[i] = true; } } } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Allocate(int64_t capacity) { this->capacity_ = capacity; this->buffer_ = std::make_shared<HashmapBuffer>(this->capacity_, this->dsize_key_, this->dsize_value_, this->device_); buffer_ctx_ = std::make_shared<CPUHashmapBufferAccessor>( this->capacity_, this->dsize_key_, this->dsize_value_, this->buffer_->GetKeyBuffer(), this->buffer_->GetValueBuffer(), this->buffer_->GetHeap()); buffer_ctx_->Reset(); impl_ = std::make_shared<tbb::concurrent_unordered_map<Key, addr_t, Hash>>( capacity, Hash()); } } // namespace core } // namespace open3d
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <tbb/concurrent_unordered_map.h> #include <limits> #include <unordered_map> #include "open3d/core/hashmap/CPU/CPUHashmapBufferAccessor.hpp" #include "open3d/core/hashmap/DeviceHashmap.h" namespace open3d { namespace core { template <typename Key, typename Hash> class TBBHashmap : public DeviceHashmap { public: TBBHashmap(int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device); ~TBBHashmap(); void Rehash(int64_t buckets) override; void Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Erase(const void* input_keys, bool* output_masks, int64_t count) override; int64_t GetActiveIndices(addr_t* output_indices) override; int64_t Size() const override; int64_t GetBucketCount() const override; std::vector<int64_t> BucketSizes() const override; float LoadFactor() const override; std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> GetImpl() const { return impl_; } protected: std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> impl_; std::shared_ptr<CPUHashmapBufferAccessor> buffer_ctx_; void InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count); void Allocate(int64_t capacity); }; template <typename Key, typename Hash> TBBHashmap<Key, Hash>::TBBHashmap(int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device) : DeviceHashmap(init_capacity, dsize_key, dsize_value, device) { Allocate(init_capacity); } template <typename Key, typename Hash> TBBHashmap<Key, Hash>::~TBBHashmap() {} template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::Size() const { return impl_->size(); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { int64_t new_size = Size() + count; if (new_size > this->capacity_) { int64_t bucket_count = GetBucketCount(); float avg_capacity_per_bucket = float(this->capacity_) / float(bucket_count); int64_t expected_buckets = std::max( bucket_count * 2, int64_t(std::ceil(new_size / avg_capacity_per_bucket))); Rehash(expected_buckets); } InsertImpl(input_keys, input_values, output_addrs, output_masks, count); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { Insert(input_keys, nullptr, output_addrs, output_masks, count); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; output_addrs[i] = flag ? iter->second : 0; } } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Erase(const void* input_keys, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; if (flag) { buffer_ctx_->DeviceFree(iter->second); impl_->unsafe_erase(iter); } } } template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::GetActiveIndices(addr_t* output_indices) { int64_t count = impl_->size(); int64_t i = 0; for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) { output_indices[i] = static_cast<int64_t>(iter->second); } return count; } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Rehash(int64_t buckets) { int64_t iterator_count = Size(); Tensor active_keys; Tensor active_values; if (iterator_count > 0) { Tensor active_addrs({iterator_count}, Dtype::Int32, this->device_); GetActiveIndices(static_cast<addr_t*>(active_addrs.GetDataPtr())); Tensor active_indices = active_addrs.To(Dtype::Int64); active_keys = this->GetKeyBuffer().IndexGet({active_indices}); active_values = this->GetValueBuffer().IndexGet({active_indices}); } float avg_capacity_per_bucket = float(this->capacity_) / float(GetBucketCount()); int64_t new_capacity = int64_t(std::ceil(buckets * avg_capacity_per_bucket)); Allocate(new_capacity); if (iterator_count > 0) { Tensor output_addrs({iterator_count}, Dtype::Int32, this->device_); Tensor output_masks({iterator_count}, Dtype::Bool, this->device_); InsertImpl(active_keys.GetDataPtr(), active_values.GetDataPtr(), static_cast<addr_t*>(output_addrs.GetDataPtr()), output_masks.GetDataPtr<bool>(), iterator_count); } impl_->rehash(buckets); } template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::GetBucketCount() const { return impl_->unsafe_bucket_count(); } template <typename Key, typename Hash> std::vector<int64_t> TBBHashmap<Key, Hash>::BucketSizes() const { int64_t bucket_count = impl_->unsafe_bucket_count(); std::vector<int64_t> ret; for (int64_t i = 0; i < bucket_count; ++i) { ret.push_back(impl_->unsafe_bucket_size(i)); } return ret; } template <typename Key, typename Hash> float TBBHashmap<Key, Hash>::LoadFactor() const { return impl_->load_factor(); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); for (int64_t i = 0; i < count; ++i) { output_addrs[i] = 0; output_masks[i] = false; const Key& key = input_keys_templated[i]; // Try to insert a dummy address. auto res = impl_->insert({key, 0}); // Lazy copy key value pair to buffer only if succeeded if (res.second) { addr_t dst_kv_addr = buffer_ctx_->DeviceAllocate(); auto dst_kv_iter = buffer_ctx_->ExtractIterator(dst_kv_addr); // Copy templated key to buffer *static_cast<Key*>(dst_kv_iter.first) = key; // Copy/reset non-templated value in buffer uint8_t* dst_value = static_cast<uint8_t*>(dst_kv_iter.second); if (input_values != nullptr) { const uint8_t* src_value = static_cast<const uint8_t*>(input_values) + this->dsize_value_ * i; std::memcpy(dst_value, src_value, this->dsize_value_); } else { std::memset(dst_value, 0, this->dsize_value_); } // Update from dummy 0 res.first->second = dst_kv_addr; // Write to return variables output_addrs[i] = dst_kv_addr; output_masks[i] = true; } } } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Allocate(int64_t capacity) { this->capacity_ = capacity; this->buffer_ = std::make_shared<HashmapBuffer>(this->capacity_, this->dsize_key_, this->dsize_value_, this->device_); buffer_ctx_ = std::make_shared<CPUHashmapBufferAccessor>( this->capacity_, this->dsize_key_, this->dsize_value_, this->buffer_->GetKeyBuffer(), this->buffer_->GetValueBuffer(), this->buffer_->GetHeap()); buffer_ctx_->Reset(); impl_ = std::make_shared<tbb::concurrent_unordered_map<Key, addr_t, Hash>>( capacity, Hash()); } } // namespace core } // namespace open3d
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #pragma once #include <tbb/concurrent_unordered_map.h> #include <limits> #include <unordered_map> #include "open3d/core/hashmap/CPU/CPUHashmapBufferAccessor.hpp" #include "open3d/core/hashmap/DeviceHashmap.h" namespace open3d { namespace core { template <typename Key, typename Hash> class TBBHashmap : public DeviceHashmap { public: TBBHashmap(int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device); ~TBBHashmap(); void Rehash(int64_t buckets) override; void Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) override; void Erase(const void* input_keys, bool* output_masks, int64_t count) override; int64_t GetActiveIndices(addr_t* output_indices) override; int64_t Size() const override; int64_t GetBucketCount() const override; std::vector<int64_t> BucketSizes() const override; float LoadFactor() const override; std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> GetImpl() const { return impl_; } protected: std::shared_ptr<tbb::concurrent_unordered_map<Key, addr_t, Hash>> impl_; std::shared_ptr<CPUHashmapBufferAccessor> buffer_ctx_; void InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count); void Allocate(int64_t capacity); }; template <typename Key, typename Hash> TBBHashmap<Key, Hash>::TBBHashmap(int64_t init_capacity, int64_t dsize_key, int64_t dsize_value, const Device& device) : DeviceHashmap(init_capacity, dsize_key, dsize_value, device) { Allocate(init_capacity); } template <typename Key, typename Hash> TBBHashmap<Key, Hash>::~TBBHashmap() {} template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::Size() const { return impl_->size(); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Insert(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { int64_t new_size = Size() + count; if (new_size > this->capacity_) { int64_t bucket_count = GetBucketCount(); float avg_capacity_per_bucket = float(this->capacity_) / float(bucket_count); int64_t expected_buckets = std::max( bucket_count * 2, int64_t(std::ceil(new_size / avg_capacity_per_bucket))); Rehash(expected_buckets); } InsertImpl(input_keys, input_values, output_addrs, output_masks, count); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Activate(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { Insert(input_keys, nullptr, output_addrs, output_masks, count); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Find(const void* input_keys, addr_t* output_addrs, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); #pragma omp parallel for for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; output_addrs[i] = flag ? iter->second : 0; } } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Erase(const void* input_keys, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); for (int64_t i = 0; i < count; ++i) { const Key& key = input_keys_templated[i]; auto iter = impl_->find(key); bool flag = (iter != impl_->end()); output_masks[i] = flag; if (flag) { buffer_ctx_->DeviceFree(iter->second); impl_->unsafe_erase(iter); } } } template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::GetActiveIndices(addr_t* output_indices) { int64_t count = impl_->size(); int64_t i = 0; for (auto iter = impl_->begin(); iter != impl_->end(); ++iter, ++i) { output_indices[i] = static_cast<int64_t>(iter->second); } return count; } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Rehash(int64_t buckets) { int64_t iterator_count = Size(); Tensor active_keys; Tensor active_values; if (iterator_count > 0) { Tensor active_addrs({iterator_count}, Dtype::Int32, this->device_); GetActiveIndices(static_cast<addr_t*>(active_addrs.GetDataPtr())); Tensor active_indices = active_addrs.To(Dtype::Int64); active_keys = this->GetKeyBuffer().IndexGet({active_indices}); active_values = this->GetValueBuffer().IndexGet({active_indices}); } float avg_capacity_per_bucket = float(this->capacity_) / float(GetBucketCount()); int64_t new_capacity = int64_t(std::ceil(buckets * avg_capacity_per_bucket)); Allocate(new_capacity); if (iterator_count > 0) { Tensor output_addrs({iterator_count}, Dtype::Int32, this->device_); Tensor output_masks({iterator_count}, Dtype::Bool, this->device_); InsertImpl(active_keys.GetDataPtr(), active_values.GetDataPtr(), static_cast<addr_t*>(output_addrs.GetDataPtr()), output_masks.GetDataPtr<bool>(), iterator_count); } impl_->rehash(buckets); } template <typename Key, typename Hash> int64_t TBBHashmap<Key, Hash>::GetBucketCount() const { return impl_->unsafe_bucket_count(); } template <typename Key, typename Hash> std::vector<int64_t> TBBHashmap<Key, Hash>::BucketSizes() const { int64_t bucket_count = impl_->unsafe_bucket_count(); std::vector<int64_t> ret; for (int64_t i = 0; i < bucket_count; ++i) { ret.push_back(impl_->unsafe_bucket_size(i)); } return ret; } template <typename Key, typename Hash> float TBBHashmap<Key, Hash>::LoadFactor() const { return impl_->load_factor(); } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::InsertImpl(const void* input_keys, const void* input_values, addr_t* output_addrs, bool* output_masks, int64_t count) { const Key* input_keys_templated = static_cast<const Key*>(input_keys); #pragma omp parallel for for (int64_t i = 0; i < count; ++i) { output_addrs[i] = 0; output_masks[i] = false; const Key& key = input_keys_templated[i]; // Try to insert a dummy address. auto res = impl_->insert({key, 0}); // Lazy copy key value pair to buffer only if succeeded if (res.second) { addr_t dst_kv_addr = buffer_ctx_->DeviceAllocate(); auto dst_kv_iter = buffer_ctx_->ExtractIterator(dst_kv_addr); // Copy templated key to buffer *static_cast<Key*>(dst_kv_iter.first) = key; // Copy/reset non-templated value in buffer uint8_t* dst_value = static_cast<uint8_t*>(dst_kv_iter.second); if (input_values != nullptr) { const uint8_t* src_value = static_cast<const uint8_t*>(input_values) + this->dsize_value_ * i; std::memcpy(dst_value, src_value, this->dsize_value_); } else { std::memset(dst_value, 0, this->dsize_value_); } // Update from dummy 0 res.first->second = dst_kv_addr; // Write to return variables output_addrs[i] = dst_kv_addr; output_masks[i] = true; } } } template <typename Key, typename Hash> void TBBHashmap<Key, Hash>::Allocate(int64_t capacity) { this->capacity_ = capacity; this->buffer_ = std::make_shared<HashmapBuffer>(this->capacity_, this->dsize_key_, this->dsize_value_, this->device_); buffer_ctx_ = std::make_shared<CPUHashmapBufferAccessor>( this->capacity_, this->dsize_key_, this->dsize_value_, this->buffer_->GetKeyBuffer(), this->buffer_->GetValueBuffer(), this->buffer_->GetHeap()); buffer_ctx_->Reset(); impl_ = std::make_shared<tbb::concurrent_unordered_map<Key, addr_t, Hash>>( capacity, Hash()); } } // namespace core } // namespace open3d
viterbi_decode_op.h
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <memory> #include <string> #include <vector> #include "paddle/fluid/operators/controlflow/compare_op.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/unique_op.h" #include "paddle/phi/kernels/funcs/gather.h" #ifdef PADDLE_WITH_MKLML #include <omp.h> #endif namespace paddle { namespace operators { template <typename DeviceContext, typename T, typename IndType> struct Argmax { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& input, framework::Tensor* out_idx, framework::Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } int64_t height = pre * post; int64_t width = n; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); // Reduce #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < height; ++i) { int64_t h = i / post; int64_t w = i % post; IndType max_idx = -1; T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile for (int64_t j = 0; j < width; ++j) { if (in_data[h * width * post + j * post + w] > max_value) { max_value = in_data[h * width * post + j * post + w]; max_idx = j; } } out_data[i] = max_value; out_idx_data[i] = max_idx; } } }; template <typename DeviceContext> struct ARange { void operator()(const DeviceContext& dev_ctx, int64_t* data, int end, int64_t scale) { for (int i = 0; i < end; ++i) { data[i] = i * scale; } } }; template <typename DeviceContext, typename T> struct GetMaxValue { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& input, T* max_value) { auto input_ptr = input.data<T>(); auto num = input.numel(); *max_value = *std::max_element(input_ptr, input_ptr + num); } }; template <typename DeviceContext, typename T, typename IndexT = int> struct Gather { void operator()(const DeviceContext& ctx, const framework::Tensor& src, const framework::Tensor& index, framework::Tensor* output) { phi::funcs::CPUGather<T, IndexT>(ctx, src, index, output); } }; template <typename T, typename Functor, typename OutT = T> void SameDimsBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); OutT* out_ptr = out->data<OutT>(); Functor functor; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]); } } template <typename DeviceContext, template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* mask) { SameDimsBinaryOP<int64_t, CompareFunctor<int64_t, T>, T>(lhs, rhs, mask); } }; template <bool is_multi_threads> struct GetInputIndex { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); for (int j = 0; j < out_dims_size; ++j) { int curr_idx = output_idx / output_strides[j]; output_idx %= output_strides[j]; *lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0; *rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0; } } }; template <> struct GetInputIndex<false> { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); *lhs_idx = phi::funcs::GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array); *rhs_idx = phi::funcs::GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array); phi::funcs::UpdateElementwiseIndexArray(output_dims.data(), out_dims_size, index_array); } }; template <typename T, typename Functor, bool is_multi_threads = false> void SimpleBroadcastBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); T* out_ptr = out->data<T>(); int out_size = static_cast<int>(out->dims().size()); std::vector<int> out_dims(out_size); std::vector<int> lhs_dims(out_size); std::vector<int> rhs_dims(out_size); std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data()); std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data()); std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data()); std::vector<int> output_strides(out_size, 1); std::vector<int> lhs_strides(out_size, 1); std::vector<int> rhs_strides(out_size, 1); std::vector<int> index_array(out_size, 0); // calculate strides for (int i = out_size - 2; i >= 0; --i) { output_strides[i] = output_strides[i + 1] * out_dims[i + 1]; lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1]; rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1]; } Functor functor; GetInputIndex<is_multi_threads> get_input_index; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { int lhs_idx = 0; int rhs_idx = 0; get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides, output_strides, i, index_array.data(), &lhs_idx, &rhs_idx); out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]); } } template <typename DeviceContext, template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* output) { if (lhs.dims() == rhs.dims()) { SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output); } else { bool is_multi_threads = false; #ifdef PADDLE_WITH_MKLML if (omp_get_max_threads() > 1) { is_multi_threads = true; } #endif if (is_multi_threads) { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output); } else { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output); } } } }; class TensorBuffer { public: explicit TensorBuffer(const framework::LoDTensor& in) : buffer_(in), offset_(0) { buffer_.Resize({buffer_.numel()}); } framework::Tensor GetBufferBlock(std::initializer_list<int64_t> shape) { int64_t size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>()); framework::Tensor block = buffer_.Slice(offset_, offset_ + size); offset_ += size; block.Resize(shape); return block; } private: framework::LoDTensor buffer_; // need to resize 1-D Tensor int offset_; }; template <typename DeviceContext, typename T> class ViterbiDecodeKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto curr_place = ctx.GetPlace(); auto* input = ctx.Input<framework::Tensor>("Input"); auto batch_size = static_cast<int>(input->dims()[0]); auto seq_len = static_cast<int>(input->dims()[1]); auto n_labels = static_cast<int>(input->dims()[2]); phi::funcs::SetConstant<DeviceContext, T> float_functor; phi::funcs::SetConstant<DeviceContext, int64_t> int_functor; std::vector<framework::Tensor> historys; // We create tensor buffer in order to avoid allocating memory frequently // 10 means allocate 10*batch_size bytes memory, such as int_mask, zero... int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size; framework::LoDTensor int_buffer; int_buffer.Resize(phi::make_ddim({buffer_size})); int_buffer.mutable_data<int64_t>(ctx.GetPlace()); TensorBuffer int_tensor_buffer(int_buffer); // create float tensor buffer // 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max buffer_size = batch_size * (seq_len + 10) * n_labels + (batch_size + 2) * n_labels * n_labels; framework::LoDTensor float_buffer; float_buffer.Resize(phi::make_ddim({buffer_size})); float_buffer.mutable_data<T>(ctx.GetPlace()); TensorBuffer float_tensor_buffer(float_buffer); auto* length = ctx.Input<framework::Tensor>("Length"); framework::Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::TensorCopy(*length, curr_place, dev_ctx, &left_length); int64_t max_seq_len = 0; GetMaxValue<DeviceContext, int64_t> get_max_value; get_max_value(dev_ctx, left_length, &max_seq_len); auto* scores = ctx.Output<framework::Tensor>("Scores"); scores->mutable_data<T>(curr_place); auto* path = ctx.Output<framework::Tensor>("Path"); path->Resize({batch_size, max_seq_len}); path->mutable_data<int64_t>(curr_place); framework::Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size}); auto batch_path = Unbind(tpath); for (auto it = batch_path.begin(); it != batch_path.end(); ++it) { it->Resize({batch_size}); } // create and init required tensor framework::Tensor input_exp = float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2}); auto* transition = ctx.Input<framework::Tensor>("Transition"); framework::Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels}); framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp); trans_exp.Resize({1, n_labels, n_labels}); framework::Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &zero, 0); framework::Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &one, 1); framework::Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1}); float_functor(dev_ctx, &float_one, static_cast<T>(1.0)); framework::Tensor alpha_trn_sum = float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels}); framework::Tensor alpha_max = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor alpha_argmax = int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); auto alpha_argmax_unbind = Unbind(alpha_argmax); framework::Tensor alpha_nxt = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor rest_trans = float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels}); framework::Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size}); std::vector<const framework::Tensor*> shape{&rest_trans, &stop_trans, &start_trans}; std::vector<framework::Tensor*> outputs{&rest_trans, &stop_trans, &start_trans}; math::SplitFunctor<DeviceContext, T> split_functor; split_functor(dev_ctx, trans_exp, shape, 1, &outputs); stop_trans.Resize({1, n_labels}); start_trans.Resize({1, n_labels}); auto logit0 = input_exp.Slice(0, 1); logit0.Resize({batch_size, n_labels}); BinaryOperation<DeviceContext, AddFunctor, T> AddFloat; BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt; BinaryOperation<DeviceContext, MulFunctor, T> MulFloat; BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt; BinaryOperation<DeviceContext, SubFunctor, T> SubFloat; BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt; if (include_bos_eos_tag) { AddFloat(dev_ctx, logit0, start_trans, &alpha); GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } else { alpha = logit0; } SubInt(dev_ctx, left_length, one, &left_length); Argmax<DeviceContext, T, int64_t> argmax; for (int64_t i = 1; i < max_seq_len; ++i) { framework::Tensor logit = input_exp.Slice(i, i + 1); logit.Resize({batch_size, n_labels}); framework::Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1}); AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum); auto alpha_argmax_temp = alpha_argmax_unbind[i - 1]; alpha_argmax_temp.Resize({batch_size, n_labels}); argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1); historys.emplace_back(alpha_argmax_temp); AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt); alpha.Resize({batch_size, n_labels}); // mask = paddle.cast((left_length > 0), dtype='float32') // alpha = mask * alpha_nxt + (1 - mask) * alpha GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero, &float_mask); // alpha_nxt = mask * alpha_nxt MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt); // inv_mask = 1 - mask SubFloat(dev_ctx, float_one, float_mask, &float_mask); // alpha = (1 - mask) * alpha MulFloat(dev_ctx, alpha, float_mask, &alpha); // alpha += alpha_nxt AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); if (include_bos_eos_tag) { GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); // alpha += mask * trans_exp[:, self.stop_idx] MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } SubInt(dev_ctx, left_length, one, &left_length); } argmax(ctx, alpha, &last_ids, scores, 1); left_length.Resize({batch_size}); GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length, zero, &int_mask); // last_ids_update = last_ids * tag_mask int last_ids_index = 1; int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len)); MulInt(dev_ctx, last_ids, int_mask, &batch_path[actual_len - last_ids_index]); // The algorithm below can refer to // https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438 ARange<DeviceContext> arange; arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels); Gather<DeviceContext, int64_t, int64_t> gather; for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) { ++last_ids_index; AddInt(dev_ctx, left_length, one, &left_length); AddInt(dev_ctx, batch_offset, last_ids, &gather_idx); framework::Tensor& last_ids_update = batch_path[actual_len - last_ids_index]; hist->Resize({batch_size * n_labels}); gather(dev_ctx, *hist, gather_idx, &last_ids_update); GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update); GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero, &zero_len_mask); MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp); SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask); MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update); AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update); GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids, int_mask, &last_ids); AddInt(dev_ctx, last_ids_update, last_ids, &last_ids); } TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0}); } }; } // namespace operators } // namespace paddle
#pragma once #include <algorithm> #include <memory> #include <string> #include <vector> #include "paddle/fluid/operators/controlflow/compare_op.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/unique_op.h" #include "paddle/phi/kernels/funcs/gather.h" #ifdef PADDLE_WITH_MKLML #include <omp.h> #endif namespace paddle { namespace operators { template <typename DeviceContext, typename T, typename IndType> struct Argmax { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& input, framework::Tensor* out_idx, framework::Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } int64_t height = pre * post; int64_t width = n; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); // Reduce #ifdef PADDLE_WITH_MKLML #endif for (int64_t i = 0; i < height; ++i) { int64_t h = i / post; int64_t w = i % post; IndType max_idx = -1; T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile for (int64_t j = 0; j < width; ++j) { if (in_data[h * width * post + j * post + w] > max_value) { max_value = in_data[h * width * post + j * post + w]; max_idx = j; } } out_data[i] = max_value; out_idx_data[i] = max_idx; } } }; template <typename DeviceContext> struct ARange { void operator()(const DeviceContext& dev_ctx, int64_t* data, int end, int64_t scale) { for (int i = 0; i < end; ++i) { data[i] = i * scale; } } }; template <typename DeviceContext, typename T> struct GetMaxValue { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& input, T* max_value) { auto input_ptr = input.data<T>(); auto num = input.numel(); *max_value = *std::max_element(input_ptr, input_ptr + num); } }; template <typename DeviceContext, typename T, typename IndexT = int> struct Gather { void operator()(const DeviceContext& ctx, const framework::Tensor& src, const framework::Tensor& index, framework::Tensor* output) { phi::funcs::CPUGather<T, IndexT>(ctx, src, index, output); } }; template <typename T, typename Functor, typename OutT = T> void SameDimsBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); OutT* out_ptr = out->data<OutT>(); Functor functor; #ifdef PADDLE_WITH_MKLML #endif for (int i = 0; i < out->numel(); ++i) { out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]); } } template <typename DeviceContext, template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* mask) { SameDimsBinaryOP<int64_t, CompareFunctor<int64_t, T>, T>(lhs, rhs, mask); } }; template <bool is_multi_threads> struct GetInputIndex { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); for (int j = 0; j < out_dims_size; ++j) { int curr_idx = output_idx / output_strides[j]; output_idx %= output_strides[j]; *lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0; *rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0; } } }; template <> struct GetInputIndex<false> { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); *lhs_idx = phi::funcs::GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array); *rhs_idx = phi::funcs::GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array); phi::funcs::UpdateElementwiseIndexArray(output_dims.data(), out_dims_size, index_array); } }; template <typename T, typename Functor, bool is_multi_threads = false> void SimpleBroadcastBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); T* out_ptr = out->data<T>(); int out_size = static_cast<int>(out->dims().size()); std::vector<int> out_dims(out_size); std::vector<int> lhs_dims(out_size); std::vector<int> rhs_dims(out_size); std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data()); std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data()); std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data()); std::vector<int> output_strides(out_size, 1); std::vector<int> lhs_strides(out_size, 1); std::vector<int> rhs_strides(out_size, 1); std::vector<int> index_array(out_size, 0); // calculate strides for (int i = out_size - 2; i >= 0; --i) { output_strides[i] = output_strides[i + 1] * out_dims[i + 1]; lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1]; rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1]; } Functor functor; GetInputIndex<is_multi_threads> get_input_index; #ifdef PADDLE_WITH_MKLML #endif for (int i = 0; i < out->numel(); ++i) { int lhs_idx = 0; int rhs_idx = 0; get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides, output_strides, i, index_array.data(), &lhs_idx, &rhs_idx); out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]); } } template <typename DeviceContext, template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* output) { if (lhs.dims() == rhs.dims()) { SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output); } else { bool is_multi_threads = false; #ifdef PADDLE_WITH_MKLML if (omp_get_max_threads() > 1) { is_multi_threads = true; } #endif if (is_multi_threads) { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output); } else { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output); } } } }; class TensorBuffer { public: explicit TensorBuffer(const framework::LoDTensor& in) : buffer_(in), offset_(0) { buffer_.Resize({buffer_.numel()}); } framework::Tensor GetBufferBlock(std::initializer_list<int64_t> shape) { int64_t size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>()); framework::Tensor block = buffer_.Slice(offset_, offset_ + size); offset_ += size; block.Resize(shape); return block; } private: framework::LoDTensor buffer_; // need to resize 1-D Tensor int offset_; }; template <typename DeviceContext, typename T> class ViterbiDecodeKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto curr_place = ctx.GetPlace(); auto* input = ctx.Input<framework::Tensor>("Input"); auto batch_size = static_cast<int>(input->dims()[0]); auto seq_len = static_cast<int>(input->dims()[1]); auto n_labels = static_cast<int>(input->dims()[2]); phi::funcs::SetConstant<DeviceContext, T> float_functor; phi::funcs::SetConstant<DeviceContext, int64_t> int_functor; std::vector<framework::Tensor> historys; // We create tensor buffer in order to avoid allocating memory frequently // 10 means allocate 10*batch_size bytes memory, such as int_mask, zero... int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size; framework::LoDTensor int_buffer; int_buffer.Resize(phi::make_ddim({buffer_size})); int_buffer.mutable_data<int64_t>(ctx.GetPlace()); TensorBuffer int_tensor_buffer(int_buffer); // create float tensor buffer // 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max buffer_size = batch_size * (seq_len + 10) * n_labels + (batch_size + 2) * n_labels * n_labels; framework::LoDTensor float_buffer; float_buffer.Resize(phi::make_ddim({buffer_size})); float_buffer.mutable_data<T>(ctx.GetPlace()); TensorBuffer float_tensor_buffer(float_buffer); auto* length = ctx.Input<framework::Tensor>("Length"); framework::Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::TensorCopy(*length, curr_place, dev_ctx, &left_length); int64_t max_seq_len = 0; GetMaxValue<DeviceContext, int64_t> get_max_value; get_max_value(dev_ctx, left_length, &max_seq_len); auto* scores = ctx.Output<framework::Tensor>("Scores"); scores->mutable_data<T>(curr_place); auto* path = ctx.Output<framework::Tensor>("Path"); path->Resize({batch_size, max_seq_len}); path->mutable_data<int64_t>(curr_place); framework::Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size}); auto batch_path = Unbind(tpath); for (auto it = batch_path.begin(); it != batch_path.end(); ++it) { it->Resize({batch_size}); } // create and init required tensor framework::Tensor input_exp = float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2}); auto* transition = ctx.Input<framework::Tensor>("Transition"); framework::Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels}); framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp); trans_exp.Resize({1, n_labels, n_labels}); framework::Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &zero, 0); framework::Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &one, 1); framework::Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1}); float_functor(dev_ctx, &float_one, static_cast<T>(1.0)); framework::Tensor alpha_trn_sum = float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels}); framework::Tensor alpha_max = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor alpha_argmax = int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); auto alpha_argmax_unbind = Unbind(alpha_argmax); framework::Tensor alpha_nxt = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor rest_trans = float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels}); framework::Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size}); std::vector<const framework::Tensor*> shape{&rest_trans, &stop_trans, &start_trans}; std::vector<framework::Tensor*> outputs{&rest_trans, &stop_trans, &start_trans}; math::SplitFunctor<DeviceContext, T> split_functor; split_functor(dev_ctx, trans_exp, shape, 1, &outputs); stop_trans.Resize({1, n_labels}); start_trans.Resize({1, n_labels}); auto logit0 = input_exp.Slice(0, 1); logit0.Resize({batch_size, n_labels}); BinaryOperation<DeviceContext, AddFunctor, T> AddFloat; BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt; BinaryOperation<DeviceContext, MulFunctor, T> MulFloat; BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt; BinaryOperation<DeviceContext, SubFunctor, T> SubFloat; BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt; if (include_bos_eos_tag) { AddFloat(dev_ctx, logit0, start_trans, &alpha); GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } else { alpha = logit0; } SubInt(dev_ctx, left_length, one, &left_length); Argmax<DeviceContext, T, int64_t> argmax; for (int64_t i = 1; i < max_seq_len; ++i) { framework::Tensor logit = input_exp.Slice(i, i + 1); logit.Resize({batch_size, n_labels}); framework::Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1}); AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum); auto alpha_argmax_temp = alpha_argmax_unbind[i - 1]; alpha_argmax_temp.Resize({batch_size, n_labels}); argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1); historys.emplace_back(alpha_argmax_temp); AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt); alpha.Resize({batch_size, n_labels}); // mask = paddle.cast((left_length > 0), dtype='float32') // alpha = mask * alpha_nxt + (1 - mask) * alpha GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero, &float_mask); // alpha_nxt = mask * alpha_nxt MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt); // inv_mask = 1 - mask SubFloat(dev_ctx, float_one, float_mask, &float_mask); // alpha = (1 - mask) * alpha MulFloat(dev_ctx, alpha, float_mask, &alpha); // alpha += alpha_nxt AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); if (include_bos_eos_tag) { GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); // alpha += mask * trans_exp[:, self.stop_idx] MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } SubInt(dev_ctx, left_length, one, &left_length); } argmax(ctx, alpha, &last_ids, scores, 1); left_length.Resize({batch_size}); GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length, zero, &int_mask); // last_ids_update = last_ids * tag_mask int last_ids_index = 1; int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len)); MulInt(dev_ctx, last_ids, int_mask, &batch_path[actual_len - last_ids_index]); // The algorithm below can refer to // https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438 ARange<DeviceContext> arange; arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels); Gather<DeviceContext, int64_t, int64_t> gather; for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) { ++last_ids_index; AddInt(dev_ctx, left_length, one, &left_length); AddInt(dev_ctx, batch_offset, last_ids, &gather_idx); framework::Tensor& last_ids_update = batch_path[actual_len - last_ids_index]; hist->Resize({batch_size * n_labels}); gather(dev_ctx, *hist, gather_idx, &last_ids_update); GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update); GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero, &zero_len_mask); MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp); SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask); MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update); AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update); GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids, int_mask, &last_ids); AddInt(dev_ctx, last_ids_update, last_ids, &last_ids); } TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0}); } }; } // namespace operators } // namespace paddle
#pragma once #include <algorithm> #include <memory> #include <string> #include <vector> #include "paddle/fluid/operators/controlflow/compare_op.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/unique_op.h" #include "paddle/phi/kernels/funcs/gather.h" #ifdef PADDLE_WITH_MKLML #include <omp.h> #endif namespace paddle { namespace operators { template <typename DeviceContext, typename T, typename IndType> struct Argmax { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& input, framework::Tensor* out_idx, framework::Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } int64_t height = pre * post; int64_t width = n; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); // Reduce #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < height; ++i) { int64_t h = i / post; int64_t w = i % post; IndType max_idx = -1; T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile for (int64_t j = 0; j < width; ++j) { if (in_data[h * width * post + j * post + w] > max_value) { max_value = in_data[h * width * post + j * post + w]; max_idx = j; } } out_data[i] = max_value; out_idx_data[i] = max_idx; } } }; template <typename DeviceContext> struct ARange { void operator()(const DeviceContext& dev_ctx, int64_t* data, int end, int64_t scale) { for (int i = 0; i < end; ++i) { data[i] = i * scale; } } }; template <typename DeviceContext, typename T> struct GetMaxValue { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& input, T* max_value) { auto input_ptr = input.data<T>(); auto num = input.numel(); *max_value = *std::max_element(input_ptr, input_ptr + num); } }; template <typename DeviceContext, typename T, typename IndexT = int> struct Gather { void operator()(const DeviceContext& ctx, const framework::Tensor& src, const framework::Tensor& index, framework::Tensor* output) { phi::funcs::CPUGather<T, IndexT>(ctx, src, index, output); } }; template <typename T, typename Functor, typename OutT = T> void SameDimsBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); OutT* out_ptr = out->data<OutT>(); Functor functor; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]); } } template <typename DeviceContext, template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* mask) { SameDimsBinaryOP<int64_t, CompareFunctor<int64_t, T>, T>(lhs, rhs, mask); } }; template <bool is_multi_threads> struct GetInputIndex { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); for (int j = 0; j < out_dims_size; ++j) { int curr_idx = output_idx / output_strides[j]; output_idx %= output_strides[j]; *lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0; *rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0; } } }; template <> struct GetInputIndex<false> { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); *lhs_idx = phi::funcs::GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array); *rhs_idx = phi::funcs::GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array); phi::funcs::UpdateElementwiseIndexArray(output_dims.data(), out_dims_size, index_array); } }; template <typename T, typename Functor, bool is_multi_threads = false> void SimpleBroadcastBinaryOP(const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); T* out_ptr = out->data<T>(); int out_size = static_cast<int>(out->dims().size()); std::vector<int> out_dims(out_size); std::vector<int> lhs_dims(out_size); std::vector<int> rhs_dims(out_size); std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data()); std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data()); std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data()); std::vector<int> output_strides(out_size, 1); std::vector<int> lhs_strides(out_size, 1); std::vector<int> rhs_strides(out_size, 1); std::vector<int> index_array(out_size, 0); // calculate strides for (int i = out_size - 2; i >= 0; --i) { output_strides[i] = output_strides[i + 1] * out_dims[i + 1]; lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1]; rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1]; } Functor functor; GetInputIndex<is_multi_threads> get_input_index; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { int lhs_idx = 0; int rhs_idx = 0; get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides, output_strides, i, index_array.data(), &lhs_idx, &rhs_idx); out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]); } } template <typename DeviceContext, template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation { void operator()(const DeviceContext& dev_ctx, const framework::Tensor& lhs, const framework::Tensor& rhs, framework::Tensor* output) { if (lhs.dims() == rhs.dims()) { SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output); } else { bool is_multi_threads = false; #ifdef PADDLE_WITH_MKLML if (omp_get_max_threads() > 1) { is_multi_threads = true; } #endif if (is_multi_threads) { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output); } else { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output); } } } }; class TensorBuffer { public: explicit TensorBuffer(const framework::LoDTensor& in) : buffer_(in), offset_(0) { buffer_.Resize({buffer_.numel()}); } framework::Tensor GetBufferBlock(std::initializer_list<int64_t> shape) { int64_t size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>()); framework::Tensor block = buffer_.Slice(offset_, offset_ + size); offset_ += size; block.Resize(shape); return block; } private: framework::LoDTensor buffer_; // need to resize 1-D Tensor int offset_; }; template <typename DeviceContext, typename T> class ViterbiDecodeKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto curr_place = ctx.GetPlace(); auto* input = ctx.Input<framework::Tensor>("Input"); auto batch_size = static_cast<int>(input->dims()[0]); auto seq_len = static_cast<int>(input->dims()[1]); auto n_labels = static_cast<int>(input->dims()[2]); phi::funcs::SetConstant<DeviceContext, T> float_functor; phi::funcs::SetConstant<DeviceContext, int64_t> int_functor; std::vector<framework::Tensor> historys; // We create tensor buffer in order to avoid allocating memory frequently // 10 means allocate 10*batch_size bytes memory, such as int_mask, zero... int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size; framework::LoDTensor int_buffer; int_buffer.Resize(phi::make_ddim({buffer_size})); int_buffer.mutable_data<int64_t>(ctx.GetPlace()); TensorBuffer int_tensor_buffer(int_buffer); // create float tensor buffer // 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max buffer_size = batch_size * (seq_len + 10) * n_labels + (batch_size + 2) * n_labels * n_labels; framework::LoDTensor float_buffer; float_buffer.Resize(phi::make_ddim({buffer_size})); float_buffer.mutable_data<T>(ctx.GetPlace()); TensorBuffer float_tensor_buffer(float_buffer); auto* length = ctx.Input<framework::Tensor>("Length"); framework::Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::TensorCopy(*length, curr_place, dev_ctx, &left_length); int64_t max_seq_len = 0; GetMaxValue<DeviceContext, int64_t> get_max_value; get_max_value(dev_ctx, left_length, &max_seq_len); auto* scores = ctx.Output<framework::Tensor>("Scores"); scores->mutable_data<T>(curr_place); auto* path = ctx.Output<framework::Tensor>("Path"); path->Resize({batch_size, max_seq_len}); path->mutable_data<int64_t>(curr_place); framework::Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size}); auto batch_path = Unbind(tpath); for (auto it = batch_path.begin(); it != batch_path.end(); ++it) { it->Resize({batch_size}); } // create and init required tensor framework::Tensor input_exp = float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2}); auto* transition = ctx.Input<framework::Tensor>("Transition"); framework::Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels}); framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp); trans_exp.Resize({1, n_labels, n_labels}); framework::Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &zero, 0); framework::Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &one, 1); framework::Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1}); float_functor(dev_ctx, &float_one, static_cast<T>(1.0)); framework::Tensor alpha_trn_sum = float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels}); framework::Tensor alpha_max = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor alpha_argmax = int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); auto alpha_argmax_unbind = Unbind(alpha_argmax); framework::Tensor alpha_nxt = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); framework::Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); framework::Tensor rest_trans = float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels}); framework::Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size}); framework::Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size}); std::vector<const framework::Tensor*> shape{&rest_trans, &stop_trans, &start_trans}; std::vector<framework::Tensor*> outputs{&rest_trans, &stop_trans, &start_trans}; math::SplitFunctor<DeviceContext, T> split_functor; split_functor(dev_ctx, trans_exp, shape, 1, &outputs); stop_trans.Resize({1, n_labels}); start_trans.Resize({1, n_labels}); auto logit0 = input_exp.Slice(0, 1); logit0.Resize({batch_size, n_labels}); BinaryOperation<DeviceContext, AddFunctor, T> AddFloat; BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt; BinaryOperation<DeviceContext, MulFunctor, T> MulFloat; BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt; BinaryOperation<DeviceContext, SubFunctor, T> SubFloat; BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt; if (include_bos_eos_tag) { AddFloat(dev_ctx, logit0, start_trans, &alpha); GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } else { alpha = logit0; } SubInt(dev_ctx, left_length, one, &left_length); Argmax<DeviceContext, T, int64_t> argmax; for (int64_t i = 1; i < max_seq_len; ++i) { framework::Tensor logit = input_exp.Slice(i, i + 1); logit.Resize({batch_size, n_labels}); framework::Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1}); AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum); auto alpha_argmax_temp = alpha_argmax_unbind[i - 1]; alpha_argmax_temp.Resize({batch_size, n_labels}); argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1); historys.emplace_back(alpha_argmax_temp); AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt); alpha.Resize({batch_size, n_labels}); // mask = paddle.cast((left_length > 0), dtype='float32') // alpha = mask * alpha_nxt + (1 - mask) * alpha GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero, &float_mask); // alpha_nxt = mask * alpha_nxt MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt); // inv_mask = 1 - mask SubFloat(dev_ctx, float_one, float_mask, &float_mask); // alpha = (1 - mask) * alpha MulFloat(dev_ctx, alpha, float_mask, &alpha); // alpha += alpha_nxt AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); if (include_bos_eos_tag) { GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); // alpha += mask * trans_exp[:, self.stop_idx] MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } SubInt(dev_ctx, left_length, one, &left_length); } argmax(ctx, alpha, &last_ids, scores, 1); left_length.Resize({batch_size}); GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length, zero, &int_mask); // last_ids_update = last_ids * tag_mask int last_ids_index = 1; int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len)); MulInt(dev_ctx, last_ids, int_mask, &batch_path[actual_len - last_ids_index]); // The algorithm below can refer to // https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438 ARange<DeviceContext> arange; arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels); Gather<DeviceContext, int64_t, int64_t> gather; for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) { ++last_ids_index; AddInt(dev_ctx, left_length, one, &left_length); AddInt(dev_ctx, batch_offset, last_ids, &gather_idx); framework::Tensor& last_ids_update = batch_path[actual_len - last_ids_index]; hist->Resize({batch_size * n_labels}); gather(dev_ctx, *hist, gather_idx, &last_ids_update); GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update); GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero, &zero_len_mask); MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp); SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask); MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update); AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update); GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids, int_mask, &last_ids); AddInt(dev_ctx, last_ids_update, last_ids, &last_ids); } TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0}); } }; } // namespace operators } // namespace paddle
opencl_sxc_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sxc); #else #include <string.h> #include "sha.h" #include <openssl/blowfish.h> #include "aes.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #define FORMAT_LABEL "sxc-opencl" #define FORMAT_NAME "StarOffice .sxc" #define FORMAT_TAG "$sxc$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(sxc_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 typedef struct { uint32_t length; uint8_t v[20]; // hash of password } sxc_password; typedef struct { uint32_t v[16/4]; } sxc_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } sxc_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } sxc_cpu_salt; static sxc_cpu_salt *cur_salt; static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; static cl_int cl_error; static sxc_password *inbuffer; static sxc_hash *outbuffer; static sxc_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(sxc_password) * gws; outsize = sizeof(sxc_hash) * gws; settingsize = sizeof(sxc_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(gws, sizeof(*saved_key)); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(sxc_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static sxc_cpu_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (sxc_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { unsigned char hash[20]; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA1_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 20); inbuffer[index].length = 20; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for(index = 0; index < count; index++) { BF_KEY bf_key; SHA_CTX ctx; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char*)outbuffer[index].v); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { sxc_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_opencl_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, sxc_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
#ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sxc); #else #include <string.h> #include "sha.h" #include <openssl/blowfish.h> #include "aes.h" #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #define FORMAT_LABEL "sxc-opencl" #define FORMAT_NAME "StarOffice .sxc" #define FORMAT_TAG "$sxc$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(sxc_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 typedef struct { uint32_t length; uint8_t v[20]; //hash of password } sxc_password; typedef struct { uint32_t v[16 / 4]; } sxc_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } sxc_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32(*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } sxc_cpu_salt; static sxc_cpu_salt *cur_salt; static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; static cl_int cl_error; static sxc_password *inbuffer; static sxc_hash *outbuffer; static sxc_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 //This file contains auto - tuning routine(s).Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char *warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(sxc_password) * gws; outsize = sizeof(sxc_hash) * gws; settingsize = sizeof(sxc_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(gws, sizeof(*saved_key)); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); ///Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); //Initialize openCL tuning(library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(sxc_password), 0, db); //Auto tune execution from shared / included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char * output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static sxc_cpu_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE + 1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (sxc_cpu_salt *) salt; memcpy((char *)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char * get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); for (index = 0; index < count; index++) { unsigned char hash[20]; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA1_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 20); inbuffer[index].length = 20; } ///Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); ///Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); ///Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; for (index = 0; index < count; index++) { BF_KEY bf_key; SHA_CTX ctx; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char *)outbuffer[index].v); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char *)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { sxc_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_opencl_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, {FORMAT_TAG}, sxc_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash/* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
#ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_sxc; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_sxc); #else #include <string.h> #include "sha.h" #include <openssl/blowfish.h> #include "aes.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "options.h" #include "common.h" #include "formats.h" #include "common-opencl.h" #define FORMAT_LABEL "sxc-opencl" #define FORMAT_NAME "StarOffice .sxc" #define FORMAT_TAG "$sxc$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 20 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(sxc_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 typedef struct { uint32_t length; uint8_t v[20]; //hash of password } sxc_password; typedef struct { uint32_t v[16 / 4]; } sxc_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } sxc_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32(*crypt_out)[32 / sizeof(ARCH_WORD_32)]; typedef struct { int cipher_type; int checksum_type; int iterations; int key_size; int iv_length; int salt_length; int original_length; int length; unsigned char iv[16]; unsigned char salt[32]; unsigned char content[1024]; } sxc_cpu_salt; static sxc_cpu_salt *cur_salt; static struct fmt_tests sxc_tests[] = { {"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"}, {"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"}, {NULL} }; static cl_int cl_error; static sxc_password *inbuffer; static sxc_hash *outbuffer; static sxc_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static size_t insize, outsize, settingsize; #define STEP 0 #define SEED 256 //This file contains auto - tuning routine(s).Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char *warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(sxc_password) * gws; outsize = sizeof(sxc_hash) * gws; settingsize = sizeof(sxc_salt); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); saved_key = mem_calloc(gws, sizeof(*saved_key)); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); ///Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(saved_key); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", (int)sizeof(inbuffer->v), (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); //Initialize openCL tuning(library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(sxc_password), 0, db); //Auto tune execution from shared / included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */ goto err; res = atoi(p); if (res != 0 && res != 1) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; res = atoi(p); if (res <= 0) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* key size */ goto err; res = atoi(p); if (res != 16 && res != 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */ goto err; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv length */ goto err; res = atoi(p); if (res <= 0 || res > 16) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iv */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res <= 0 || res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (!ishex(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* original length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) /* 1024 because of "unsigned char * output[1024];" in crypt_all */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; res = atoi(p); if (res <= 0 || res > 1024) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* content */ goto err; if (hexlenl(p, &extra) != res * 2 || extra) goto err; if (strtokm(NULL, "*") != NULL) /* the end */ goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void * get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static sxc_cpu_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 6; /* skip over "$sxc$*" */ p = strtokm(ctcopy, "*"); cs.cipher_type = atoi(p); p = strtokm(NULL, "*"); cs.checksum_type = atoi(p); p = strtokm(NULL, "*"); cs.iterations = atoi(p); p = strtokm(NULL, "*"); cs.key_size = atoi(p); strtokm(NULL, "*"); /* skip checksum field */ p = strtokm(NULL, "*"); cs.iv_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.iv_length; i++) cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.salt_length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.salt_length; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.original_length = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length; i++) cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void * get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE + 1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip over "$sxc$*" */ strtokm(ctcopy, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); strtokm(NULL, "*"); p = strtokm(NULL, "*"); for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return out; } static void set_salt(void *salt) { cur_salt = (sxc_cpu_salt *) salt; memcpy((char *)currentsalt.salt, cur_salt->salt, cur_salt->salt_length); currentsalt.length = cur_salt->salt_length; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->key_size; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char * get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char hash[20]; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index])); SHA1_Final((unsigned char *)hash, &ctx); memcpy(inbuffer[index].v, hash, 20); inbuffer[index].length = 20; } ///Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); ///Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); ///Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { BF_KEY bf_key; SHA_CTX ctx; int bf_ivec_pos; unsigned char ivec[8]; unsigned char output[1024]; bf_ivec_pos = 0; memcpy(ivec, cur_salt->iv, 8); BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char *)outbuffer[index].v); BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0); SHA1_Init(&ctx); SHA1_Update(&ctx, output, cur_salt->original_length); SHA1_Final((unsigned char *)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { sxc_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->iterations; } struct fmt_main fmt_opencl_sxc = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, {FORMAT_TAG}, sxc_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash/* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
polybench.c
/** * polybench.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> #ifdef _OPENMP # include <omp.h> #endif /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR # define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB # define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI # include <papi.h> # define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char* _polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #ifdef POLYBENCH_TIME struct timeval Tp; int stat; stat = gettimeofday (&Tp, NULL); if (stat != 0) printf ("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double* flush = (double*) calloc (cs, sizeof(double)); int i; double tmp = 0.0; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < cs; i++) tmp += flush[i]; assert (tmp <= 10.0); free (flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* Use FIFO scheduler to limit OS interference. Program must be run as root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO); sched_setscheduler (0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER); sched_setscheduler (0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line); else { fprintf (stdout,"%-40s SKIPPED\n", file); fprintf (stdout,"Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf (buf, "System error in %s", call); perror (buf); } else if (retval > 0) fprintf (stdout,"Error: %s\n", call); else if (retval == 0) fprintf (stdout,"Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN); fprintf (stdout,"Error in %s: %s\n", call, errstring); } fprintf (stdout,"\n"); if (PAPI_is_initialized ()) PAPI_shutdown (); exit (1); } void polybench_papi_init() { # ifdef _OPENMP #pragma omp parallel { #pragma omp master { if (omp_get_max_threads () < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads () - 1; } #pragma omp barrier if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail (__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code (_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_close() { # ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized ()) PAPI_shutdown (); # ifdef _OPENMP } } #pragma omp barrier # endif } int polybench_papi_start_counter(int evid) { # ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); # endif # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr); if (PAPI_add_event (polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_start", retval); # ifdef _OPENMP } } #pragma omp barrier # endif return 0; } void polybench_papi_stop_counter(int evid) { # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num () == polybench_papi_counters_threadid) { # endif int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read (polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval); # ifdef _OPENMP } } #pragma omp barrier # endif } void polybench_papi_print() { int verbose = 0; # ifdef _OPENMP # pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf ("On thread %d:\n", polybench_papi_counters_threadid); #endif int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf ("%s=", _polybench_papi_eventlist[evid]); printf ("%llu ", polybench_papi_values[evid]); if (verbose) printf ("\n"); } printf ("\n"); # ifdef _OPENMP } } #pragma omp barrier # endif } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler (); #endif } void polybench_timer_start() { //printf("In polybench timer start\n"); polybench_prepare_instruments (); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock (); #else polybench_c_start = rdtsc (); #endif } void polybench_timer_stop() { //printf("In polybench timer stop\n"); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock (); #else polybench_c_end = rdtsc (); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler (); #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS if (__polybench_program_total_flops == 0) { printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf ("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf ("%0.2lf\n", (__polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else # ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf ("%0.6f\n", polybench_t_end - polybench_t_start); # else printf ("%Ld\n", polybench_c_end - polybench_c_start); # endif #endif } static void * xmalloc (size_t num) { void* new = NULL; int ret = posix_memalign (&new, 32, num); if (! new || ret) { fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory"); exit (1); } return new; } void* polybench_alloc_data(unsigned long long int n, int elt_size) { /// FIXME: detect overflow! size_t val = n; val *= elt_size; void* ret = xmalloc (val); return ret; }
/** * polybench.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR #define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB #define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI #include <papi.h> #define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char *_polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #ifdef POLYBENCH_TIME struct timeval Tp; int stat; stat = gettimeofday(&Tp, NULL); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC":"=a" (cycles_lo), "=d"(cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double *flush = (double *)calloc(cs, sizeof(double)); int i; double tmp = 0.0; for (i = 0; i < cs; i++) tmp += flush[i]; assert(tmp <= 10.0); free(flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* * Use FIFO scheduler to limit OS interference. Program must be run as * root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max(SCHED_FIFO); sched_setscheduler(0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max(SCHED_OTHER); sched_setscheduler(0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf(stdout, "%-40s FAILED\nLine # %d\n", file, line); else { fprintf(stdout, "%-40s SKIPPED\n", file); fprintf(stdout, "Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf(buf, "System error in %s", call); perror(buf); } else if (retval > 0) fprintf(stdout, "Error: %s\n", call); else if (retval == 0) fprintf(stdout, "Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; PAPI_perror(retval, errstring, PAPI_MAX_STR_LEN); fprintf(stdout, "Error in %s: %s\n", call, errstring); } fprintf(stdout, "\n"); if (PAPI_is_initialized()) PAPI_shutdown(); exit(1); } void polybench_papi_init() { #ifdef _OPENMP #pragma omp master { if (omp_get_max_threads() < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads() - 1; } if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init(PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail(__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset(&polybench_papi_eventset)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code(_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; #ifdef _OPENMP } #endif } void polybench_papi_close() { #ifdef _OPENMP if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval; if ((retval = PAPI_destroy_eventset(&polybench_papi_eventset)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized()) PAPI_shutdown(); #ifdef _OPENMP } #endif } int polybench_papi_start_counter(int evid) { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); #endif #ifdef _OPENMP if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name(polybench_papi_eventlist[evid], descr); if (PAPI_add_event(polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info(polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start(polybench_papi_eventset)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_start", retval); #ifdef _OPENMP } #endif return 0; } void polybench_papi_stop_counter(int evid) { #ifdef _OPENMP if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read(polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop(polybench_papi_eventset, NULL)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_remove_event", retval); #ifdef _OPENMP } #endif } void polybench_papi_print() { int verbose = 0; #ifdef _OPENMP if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf("On thread %d:\n", polybench_papi_counters_threadid); #endif int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf("%s=", _polybench_papi_eventlist[evid]); printf("%llu ", polybench_papi_values[evid]); if (verbose) printf("\n"); } printf("\n"); #ifdef _OPENMP } #endif } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler(); #endif } void polybench_timer_start() { //printf("In polybench timer start\n"); polybench_prepare_instruments(); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock(); #else polybench_c_start = rdtsc(); #endif } void polybench_timer_stop() { //printf("In polybench timer stop\n"); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock(); #else polybench_c_end = rdtsc(); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler(); #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS if (__polybench_program_total_flops == 0) { printf("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf("%0.2lf\n", (__polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf("%0.6f\n", polybench_t_end - polybench_t_start); #else printf("%Ld\n", polybench_c_end - polybench_c_start); #endif #endif } static void * xmalloc(size_t num) { void *new = NULL; int ret = posix_memalign(&new, 32, num); if (!new || ret) { fprintf(stderr, "[PolyBench] posix_memalign: cannot allocate memory"); exit(1); } return new; } void * polybench_alloc_data(unsigned long long int n, int elt_size) { ///FIXME:detect overflow ! size_t val = n; val *= elt_size; void *ret = xmalloc(val); return ret; }
/** * polybench.c: This file is part of the PolyBench/C 3.2 test suite. * * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #include <sched.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /* By default, collect PAPI counters on thread 0. */ #ifndef POLYBENCH_THREAD_MONITOR #define POLYBENCH_THREAD_MONITOR 0 #endif /* Total LLC cache size. By default 32+MB.. */ #ifndef POLYBENCH_CACHE_SIZE_KB #define POLYBENCH_CACHE_SIZE_KB 32770 #endif int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR; double polybench_program_total_flops = 0; #ifdef POLYBENCH_PAPI #include <papi.h> #define POLYBENCH_MAX_NB_PAPI_COUNTERS 96 char *_polybench_papi_eventlist[] = { #include "papi_counters.list" NULL }; int polybench_papi_eventset; int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS]; long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS]; #endif /* Timer code (gettimeofday). */ double polybench_t_start, polybench_t_end; /* Timer code (RDTSC). */ unsigned long long int polybench_c_start, polybench_c_end; static double rtclock() { #ifdef POLYBENCH_TIME struct timeval Tp; int stat; stat = gettimeofday(&Tp, NULL); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); #else return 0; #endif } #ifdef POLYBENCH_CYCLE_ACCURATE_TIMER static unsigned long long int rdtsc() { unsigned long long int ret = 0; unsigned int cycles_lo; unsigned int cycles_hi; __asm__ volatile ("RDTSC":"=a" (cycles_lo), "=d"(cycles_hi)); ret = (unsigned long long int)cycles_hi << 32 | cycles_lo; return ret; } #endif void polybench_flush_cache() { int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double); double *flush = (double *)calloc(cs, sizeof(double)); int i; double tmp = 0.0; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < cs; i++) tmp += flush[i]; assert(tmp <= 10.0); free(flush); } #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER void polybench_linux_fifo_scheduler() { /* * Use FIFO scheduler to limit OS interference. Program must be run as * root, and this works only for Linux kernels. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max(SCHED_FIFO); sched_setscheduler(0, SCHED_FIFO, &schedParam); } void polybench_linux_standard_scheduler() { /* Restore to standard scheduler policy. */ struct sched_param schedParam; schedParam.sched_priority = sched_get_priority_max(SCHED_OTHER); sched_setscheduler(0, SCHED_OTHER, &schedParam); } #endif #ifdef POLYBENCH_PAPI static void test_fail(char *file, int line, char *call, int retval) { char buf[128]; memset(buf, '\0', sizeof(buf)); if (retval != 0) fprintf(stdout, "%-40s FAILED\nLine # %d\n", file, line); else { fprintf(stdout, "%-40s SKIPPED\n", file); fprintf(stdout, "Line # %d\n", line); } if (retval == PAPI_ESYS) { sprintf(buf, "System error in %s", call); perror(buf); } else if (retval > 0) fprintf(stdout, "Error: %s\n", call); else if (retval == 0) fprintf(stdout, "Error: %s\n", call); else { char errstring[PAPI_MAX_STR_LEN]; PAPI_perror(retval, errstring, PAPI_MAX_STR_LEN); fprintf(stdout, "Error in %s: %s\n", call, errstring); } fprintf(stdout, "\n"); if (PAPI_is_initialized()) PAPI_shutdown(); exit(1); } void polybench_papi_init() { #ifdef _OPENMP #pragma omp parallel { #pragma omp master { if (omp_get_max_threads() < polybench_papi_counters_threadid) polybench_papi_counters_threadid = omp_get_max_threads() - 1; } #pragma omp barrier if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval; polybench_papi_eventset = PAPI_NULL; if ((retval = PAPI_library_init(PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) test_fail(__FILE__, __LINE__, "PAPI_library_init", retval); if ((retval = PAPI_create_eventset(&polybench_papi_eventset)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_create_eventset", retval); int k; for (k = 0; _polybench_papi_eventlist[k]; ++k) { if ((retval = PAPI_event_name_to_code(_polybench_papi_eventlist[k], &(polybench_papi_eventlist[k]))) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_event_name_to_code", retval); } polybench_papi_eventlist[k] = 0; #ifdef _OPENMP } } #pragma omp barrier #endif } void polybench_papi_close() { #ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval; if ((retval = PAPI_destroy_eventset(&polybench_papi_eventset)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_destroy_eventset", retval); if (PAPI_is_initialized()) PAPI_shutdown(); #ifdef _OPENMP } } #pragma omp barrier #endif } int polybench_papi_start_counter(int evid) { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); #endif #ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval = 1; char descr[PAPI_MAX_STR_LEN]; PAPI_event_info_t evinfo; PAPI_event_code_to_name(polybench_papi_eventlist[evid], descr); if (PAPI_add_event(polybench_papi_eventset, polybench_papi_eventlist[evid]) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_add_event", 1); if (PAPI_get_event_info(polybench_papi_eventlist[evid], &evinfo) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_get_event_info", retval); if ((retval = PAPI_start(polybench_papi_eventset)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_start", retval); #ifdef _OPENMP } } #pragma omp barrier #endif return 0; } void polybench_papi_stop_counter(int evid) { #ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #endif int retval; long_long values[1]; values[0] = 0; if ((retval = PAPI_read(polybench_papi_eventset, &values[0])) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_read", retval); if ((retval = PAPI_stop(polybench_papi_eventset, NULL)) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_stop", retval); polybench_papi_values[evid] = values[0]; if ((retval = PAPI_remove_event (polybench_papi_eventset, polybench_papi_eventlist[evid])) != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_remove_event", retval); #ifdef _OPENMP } } #pragma omp barrier #endif } void polybench_papi_print() { int verbose = 0; #ifdef _OPENMP #pragma omp parallel { if (omp_get_thread_num() == polybench_papi_counters_threadid) { #ifdef POLYBENCH_PAPI_VERBOSE verbose = 1; #endif if (verbose) printf("On thread %d:\n", polybench_papi_counters_threadid); #endif int evid; for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid) { if (verbose) printf("%s=", _polybench_papi_eventlist[evid]); printf("%llu ", polybench_papi_values[evid]); if (verbose) printf("\n"); } printf("\n"); #ifdef _OPENMP } } #pragma omp barrier #endif } #endif /* ! POLYBENCH_PAPI */ void polybench_prepare_instruments() { #ifndef POLYBENCH_NO_FLUSH_CACHE polybench_flush_cache(); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_fifo_scheduler(); #endif } void polybench_timer_start() { //printf("In polybench timer start\n"); polybench_prepare_instruments(); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_start = rtclock(); #else polybench_c_start = rdtsc(); #endif } void polybench_timer_stop() { //printf("In polybench timer stop\n"); #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER polybench_t_end = rtclock(); #else polybench_c_end = rdtsc(); #endif #ifdef POLYBENCH_LINUX_FIFO_SCHEDULER polybench_linux_standard_scheduler(); #endif } void polybench_timer_print() { #ifdef POLYBENCH_GFLOPS if (__polybench_program_total_flops == 0) { printf("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n"); printf("%0.6lf\n", polybench_t_end - polybench_t_start); } else printf("%0.2lf\n", (__polybench_program_total_flops / (double)(polybench_t_end - polybench_t_start)) / 1000000000); #else #ifndef POLYBENCH_CYCLE_ACCURATE_TIMER printf("%0.6f\n", polybench_t_end - polybench_t_start); #else printf("%Ld\n", polybench_c_end - polybench_c_start); #endif #endif } static void * xmalloc(size_t num) { void *new = NULL; int ret = posix_memalign(&new, 32, num); if (!new || ret) { fprintf(stderr, "[PolyBench] posix_memalign: cannot allocate memory"); exit(1); } return new; } void * polybench_alloc_data(unsigned long long int n, int elt_size) { ///FIXME:detect overflow ! size_t val = n; val *= elt_size; void *ret = xmalloc(val); return ret; }
ompt-signal.h
/* * ompt-signal.h -- Header providing low-level synchronization for tests */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is a copy from runtime/test/ompt/ // //===----------------------------------------------------------------------===// #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #else #include <unistd.h> #define delay(t) usleep(t); #endif // These functions are used to provide a signal-wait mechanism to enforce // expected scheduling for the test cases. // Conditional variable (s) needs to be shared! Initialize to 0 #define OMPT_SIGNAL(s) ompt_signal(&s) // inline void ompt_signal(int *s) { #pragma omp atomic (*s)++; } #define OMPT_WAIT(s, v) ompt_wait(&s, v) // wait for s >= v // inline void ompt_wait(int *s, int v) { int wait = 0; do { delay(10); #pragma omp atomic read wait = (*s); } while (wait < v); }
/* * ompt-signal.h -- Header providing low-level synchronization for tests */ //=== ----------------------------------------------------------------------== = // // //The LLVM Compiler Infrastructure // //This file is a copy from runtime / test / ompt / // //===----------------------------------------------------------------------== = // #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #else #include <unistd.h> #define delay(t) usleep(t); #endif //These functions are used to provide a signal - wait mechanism to enforce // expected scheduling for the test cases. // Conditional variable(s) needs to be shared ! Initialize to 0 #define OMPT_SIGNAL(s) ompt_signal(&s) // inline void ompt_signal(int *s) { (*s)++; } #define OMPT_WAIT(s, v) ompt_wait(&s, v) //wait for s >=v // inline void ompt_wait(int *s, int v) { int wait = 0; do { delay(10); wait = (*s); } while (wait < v); }
/* * ompt-signal.h -- Header providing low-level synchronization for tests */ //=== ----------------------------------------------------------------------== = // // //The LLVM Compiler Infrastructure // //This file is a copy from runtime / test / ompt / // //===----------------------------------------------------------------------== = // #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #else #include <unistd.h> #define delay(t) usleep(t); #endif //These functions are used to provide a signal - wait mechanism to enforce // expected scheduling for the test cases. // Conditional variable(s) needs to be shared ! Initialize to 0 #define OMPT_SIGNAL(s) ompt_signal(&s) // inline void ompt_signal(int *s) { #pragma omp atomic (*s)++; } #define OMPT_WAIT(s, v) ompt_wait(&s, v) //wait for s >=v // inline void ompt_wait(int *s, int v) { int wait = 0; do { delay(10); #pragma omp atomic read wait = (*s); } while (wait < v); }
matching_coefficients.h
// Copyright (c) 2013-2015 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file matching_coefficients.h * * \brief Contains definition and partial implementation of sirius::Matching_coefficients class. */ #ifndef __MATCHING_COEFFICIENTS_H__ #define __MATCHING_COEFFICIENTS_H__ namespace sirius { /** The following matching conditions must be fulfilled: * \f[ * \frac{\partial^j}{\partial r^j} \sum_{L \nu} A_{L \nu}^{\bf k}({\bf G})u_{\ell \nu}(r) * Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}} = \frac{\partial^j}{\partial r^j} \frac{4 \pi}{\sqrt \Omega} * e^{i{\bf (G+k)\tau}} \sum_{L}i^{\ell} j_{\ell}(|{\bf G+k}|r) Y_{L}^{*}(\widehat {\bf G+k}) Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}} * \f] * where \f$ L = \{ \ell, m \} \f$. Dropping sum over L we arrive to the following system of linear equations: * \f[ * \sum_{\nu} \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} A_{L \nu}^{\bf k}({\bf G}) = * \frac{4 \pi}{\sqrt \Omega} e^{i{\bf (G+k)\tau}} i^{\ell} \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j} * \bigg|_{R^{MT}} Y_{L}^{*}(\widehat {\bf G+k}) * \f] * The matching coefficients are then equal to: * \f[ * A_{L \nu}^{\bf k}({\bf G}) = \sum_{j} \bigg[ \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} \bigg]_{\nu j}^{-1} * \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j} \bigg|_{R^{MT}} \frac{4 \pi}{\sqrt \Omega} i^{\ell} * e^{i{\bf (G+k)\tau}} Y_{L}^{*}(\widehat {\bf G+k}) * \f] */ class Matching_coefficients { private: Unit_cell const& unit_cell_; int num_gkvec_; //std::vector<gklo_basis_descriptor> const& gklo_basis_descriptors_; std::vector<int>& igk_; Gvec const& gkvec_; mdarray<double_complex, 2> gkvec_ylm_; std::vector<double> gkvec_len_; /// Precomputed values for the linear equations for matching coefficients. mdarray<double_complex, 4> alm_b_; /// Generate matching coefficients for a specific \f$ \ell \f$ and order. /** \param [in] ngk Number of G+k vectors. * \param [in] ia Index of atom. * \param [in] iat Index of atom type. * \param [in] l Orbital quantum nuber. * \param [in] lm Composite l,m index. * \param [in] nu Order of radial function \f$ u_{\ell \nu}(r) \f$ for which coefficients are generated. * \param [inout] A Matrix of radial derivatives. * \param [out] alm Pointer to alm coefficients. */ template <int N> inline void generate(int ngk, std::vector<double_complex> const& phase_factors__, int iat, int l, int lm, int nu, matrix3d<double>& A, double_complex* alm) const { /* invert matrix of radial derivatives */ switch (N) { case 1: { #if (__VERIFICATION > 0) if (std::abs(A(0, 0)) < 1.0 / std::sqrt(unit_cell_.omega())) { std::stringstream s; s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl << " radial function value at the MT boundary : " << A(0, 0); WARNING(s.str()); } #endif A(0, 0) = 1.0 / A(0, 0); break; } case 2: { double det = A(0, 0) * A(1, 1) - A(0, 1) * A(1, 0); #if (__VERIFICATION > 0) if (std::abs(det) < 1.0 / std::sqrt(unit_cell_.omega())) { std::stringstream s; s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl << " radial function value at the MT boundary : " << A(0 ,0); WARNING(s.str()); } #endif std::swap(A(0, 0), A(1, 1)); A(0, 0) /= det; A(1, 1) /= det; A(0, 1) = -A(0, 1) / det; A(1, 0) = -A(1, 0) / det; break; } case 3: { A = inverse(A); break; } } double_complex zt; for (int igk = 0; igk < ngk; igk++) { switch (N) { case 1: { zt = alm_b_(0, igk, l, iat) * A(0, 0); break; } case 2: { zt = alm_b_(0, igk, l, iat) * A(nu, 0) + alm_b_(1, igk, l, iat) * A(nu, 1); break; } case 3: { zt = alm_b_(0, igk, l, iat) * A(nu, 0) + alm_b_(1, igk, l, iat) * A(nu, 1) + alm_b_(2, igk, l, iat) * A(nu, 2); break; } } alm[igk] = phase_factors__[igk] * std::conj(gkvec_ylm_(igk, lm)) * zt; } } public: /// Constructor Matching_coefficients(Unit_cell const& unit_cell__, int lmax_apw__, int num_gkvec__, std::vector<int>& igk__, Gvec const& gkvec__) : unit_cell_(unit_cell__), num_gkvec_(num_gkvec__), igk_(igk__), gkvec_(gkvec__) { int lmmax_apw = Utils::lmmax(lmax_apw__); gkvec_ylm_ = mdarray<double_complex, 2>(num_gkvec_, lmmax_apw); gkvec_len_.resize(num_gkvec_); /* get length and Ylm harmonics of G+k vectors */ #pragma omp parallel { std::vector<double_complex> ylm(lmmax_apw); #pragma omp for for (int i = 0; i < num_gkvec_; i++) { auto gkvec_cart = gkvec_.gkvec_cart(igk_[i]); /* get r, theta, phi */ auto vs = SHT::spherical_coordinates(gkvec_cart); /* get spherical harmonics */ SHT::spherical_harmonics(lmax_apw__, vs[1], vs[2], &ylm[0]); gkvec_len_[i] = vs[0]; for (int lm = 0; lm < lmmax_apw; lm++) { gkvec_ylm_(i, lm) = ylm[lm]; } } } alm_b_ = mdarray<double_complex, 4>(3, num_gkvec_, lmax_apw__ + 1, unit_cell_.num_atom_types()); alm_b_.zero(); /* value and first two derivatives of spherical Bessel functions */ mdarray<double, 2> sbessel_mt(lmax_apw__ + 2, 3); for (int igk = 0; igk < num_gkvec_; igk++) { for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { double R = unit_cell_.atom_type(iat).mt_radius(); double RGk = R * gkvec_len_[igk]; /* compute values and first and second derivatives of the spherical Bessel functions at the MT boundary */ gsl_sf_bessel_jl_array(lmax_apw__ + 1, RGk, &sbessel_mt(0, 0)); /* Bessel function derivative: f_{{n}}^{{\prime}}(z)=-f_{{n+1}}(z)+(n/z)f_{{n}}(z) * * In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,1}]] * Out[]= (n SphericalBesselJ[n,a x])/x-a SphericalBesselJ[1+n,a x] * * In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,2}]] * Out[]= (((-1+n) n-a^2 x^2) SphericalBesselJ[n,a x]+2 a x SphericalBesselJ[1+n,a x])/x^2 */ for (int l = 0; l <= lmax_apw__; l++) { sbessel_mt(l, 1) = -sbessel_mt(l + 1, 0) * gkvec_len_[igk] + (l / R) * sbessel_mt(l, 0); sbessel_mt(l, 2) = 2 * gkvec_len_[igk] * sbessel_mt(l + 1, 0) / R + ((l - 1) * l - std::pow(RGk, 2)) * sbessel_mt(l, 0) / std::pow(R, 2); } for (int l = 0; l <= lmax_apw__; l++) { double_complex z = std::pow(double_complex(0, 1), l); double f = fourpi / std::sqrt(unit_cell_.omega()); alm_b_(0, igk, l, iat) = z * f * sbessel_mt(l, 0); alm_b_(1, igk, l, iat) = z * f * sbessel_mt(l, 1); alm_b_(2, igk, l, iat) = z * f * sbessel_mt(l, 2); } } } } /// Generate plane-wave matching coefficents for the radial solutions of a given atom. /** \param [in] ia Index of atom. * \param [out] alm Array of matching coefficients with dimension indices \f$ ({\bf G+k}, \xi) \f$. */ void generate(int ia, mdarray<double_complex, 2>& alm) const { auto& atom = unit_cell_.atom(ia); auto& type = atom.type(); assert(type.max_aw_order() <= 3); int iat = type.id(); std::vector<double_complex> phase_factors(num_gkvec_); for (int i = 0; i < num_gkvec_; i++) { double phase = twopi * (gkvec_.gkvec(igk_[i]) * unit_cell_.atom(ia).position()); phase_factors[i] = std::exp(double_complex(0, phase)); } matrix3d<double> A; for (int xi = 0; xi < type.mt_aw_basis_size(); xi++) { int l = type.indexb(xi).l; int lm = type.indexb(xi).lm; int nu = type.indexb(xi).order; /* order of augmentation for a given orbital quantum number */ int num_aw = static_cast<int>(type.aw_descriptor(l).size()); /* create matrix of radial derivatives */ for (int order = 0; order < num_aw; order++) { for (int dm = 0; dm < num_aw; dm++) { A(dm, order) = atom.symmetry_class().aw_surface_dm(l, order, dm); } } switch (num_aw) { /* APW */ case 1: { generate<1>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } /* LAPW */ case 2: { generate<2>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } /* Super LAPW */ case 3: { generate<3>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } default: { TERMINATE("wrong order of augmented wave"); } } } } }; } #endif // __MATCHING_COEFFICIENTS_H__
// Copyright (c) 2013-2015 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file matching_coefficients.h * * \brief Contains definition and partial implementation of sirius::Matching_coefficients class. */ #ifndef __MATCHING_COEFFICIENTS_H__ #define __MATCHING_COEFFICIENTS_H__ namespace sirius { /** The following matching conditions must be fulfilled: * \f[ * \frac{\partial^j}{\partial r^j} \sum_{L \nu} A_{L \nu}^{\bf k}({\bf G})u_{\ell \nu}(r) * Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}} = \frac{\partial^j}{\partial r^j} \frac{4 \pi}{\sqrt \Omega} * e^{i{\bf (G+k)\tau}} \sum_{L}i^{\ell} j_{\ell}(|{\bf G+k}|r) Y_{L}^{*}(\widehat {\bf G+k}) Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}} * \f] * where \f$ L = \{ \ell, m \} \f$. Dropping sum over L we arrive to the following system of linear equations: * \f[ * \sum_{\nu} \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} A_{L \nu}^{\bf k}({\bf G}) = * \frac{4 \pi}{\sqrt \Omega} e^{i{\bf (G+k)\tau}} i^{\ell} \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j} * \bigg|_{R^{MT}} Y_{L}^{*}(\widehat {\bf G+k}) * \f] * The matching coefficients are then equal to: * \f[ * A_{L \nu}^{\bf k}({\bf G}) = \sum_{j} \bigg[ \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} \bigg]_{\nu j}^{-1} * \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j} \bigg|_{R^{MT}} \frac{4 \pi}{\sqrt \Omega} i^{\ell} * e^{i{\bf (G+k)\tau}} Y_{L}^{*}(\widehat {\bf G+k}) * \f] */ class Matching_coefficients { private: Unit_cell const& unit_cell_; int num_gkvec_; //std::vector<gklo_basis_descriptor> const& gklo_basis_descriptors_; std::vector<int>& igk_; Gvec const& gkvec_; mdarray<double_complex, 2> gkvec_ylm_; std::vector<double> gkvec_len_; /// Precomputed values for the linear equations for matching coefficients. mdarray<double_complex, 4> alm_b_; /// Generate matching coefficients for a specific \f$ \ell \f$ and order. /** \param [in] ngk Number of G+k vectors. * \param [in] ia Index of atom. * \param [in] iat Index of atom type. * \param [in] l Orbital quantum nuber. * \param [in] lm Composite l,m index. * \param [in] nu Order of radial function \f$ u_{\ell \nu}(r) \f$ for which coefficients are generated. * \param [inout] A Matrix of radial derivatives. * \param [out] alm Pointer to alm coefficients. */ template <int N> inline void generate(int ngk, std::vector<double_complex> const& phase_factors__, int iat, int l, int lm, int nu, matrix3d<double>& A, double_complex* alm) const { /* invert matrix of radial derivatives */ switch (N) { case 1: { #if (__VERIFICATION > 0) if (std::abs(A(0, 0)) < 1.0 / std::sqrt(unit_cell_.omega())) { std::stringstream s; s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl << " radial function value at the MT boundary : " << A(0, 0); WARNING(s.str()); } #endif A(0, 0) = 1.0 / A(0, 0); break; } case 2: { double det = A(0, 0) * A(1, 1) - A(0, 1) * A(1, 0); #if (__VERIFICATION > 0) if (std::abs(det) < 1.0 / std::sqrt(unit_cell_.omega())) { std::stringstream s; s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl << " radial function value at the MT boundary : " << A(0 ,0); WARNING(s.str()); } #endif std::swap(A(0, 0), A(1, 1)); A(0, 0) /= det; A(1, 1) /= det; A(0, 1) = -A(0, 1) / det; A(1, 0) = -A(1, 0) / det; break; } case 3: { A = inverse(A); break; } } double_complex zt; for (int igk = 0; igk < ngk; igk++) { switch (N) { case 1: { zt = alm_b_(0, igk, l, iat) * A(0, 0); break; } case 2: { zt = alm_b_(0, igk, l, iat) * A(nu, 0) + alm_b_(1, igk, l, iat) * A(nu, 1); break; } case 3: { zt = alm_b_(0, igk, l, iat) * A(nu, 0) + alm_b_(1, igk, l, iat) * A(nu, 1) + alm_b_(2, igk, l, iat) * A(nu, 2); break; } } alm[igk] = phase_factors__[igk] * std::conj(gkvec_ylm_(igk, lm)) * zt; } } public: /// Constructor Matching_coefficients(Unit_cell const& unit_cell__, int lmax_apw__, int num_gkvec__, std::vector<int>& igk__, Gvec const& gkvec__) : unit_cell_(unit_cell__), num_gkvec_(num_gkvec__), igk_(igk__), gkvec_(gkvec__) { int lmmax_apw = Utils::lmmax(lmax_apw__); gkvec_ylm_ = mdarray<double_complex, 2>(num_gkvec_, lmmax_apw); gkvec_len_.resize(num_gkvec_); /* get length and Ylm harmonics of G+k vectors */ std::vector<double_complex> ylm(lmmax_apw); for (int i = 0; i < num_gkvec_; i++) { auto gkvec_cart = gkvec_.gkvec_cart(igk_[i]); /* get r, theta, phi */ auto vs = SHT::spherical_coordinates(gkvec_cart); /* get spherical harmonics */ SHT::spherical_harmonics(lmax_apw__, vs[1], vs[2], &ylm[0]); gkvec_len_[i] = vs[0]; for (int lm = 0; lm < lmmax_apw; lm++) { gkvec_ylm_(i, lm) = ylm[lm]; } } alm_b_ = mdarray<double_complex, 4>(3, num_gkvec_, lmax_apw__ + 1, unit_cell_.num_atom_types()); alm_b_.zero(); /* value and first two derivatives of spherical Bessel functions */ mdarray<double, 2> sbessel_mt(lmax_apw__ + 2, 3); for (int igk = 0; igk < num_gkvec_; igk++) { for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { double R = unit_cell_.atom_type(iat).mt_radius(); double RGk = R * gkvec_len_[igk]; /* compute values and first and second derivatives of the spherical Bessel functions at the MT boundary */ gsl_sf_bessel_jl_array(lmax_apw__ + 1, RGk, &sbessel_mt(0, 0)); /* Bessel function derivative: f_{{n}}^{{\prime}}(z)=-f_{{n+1}}(z)+(n/z)f_{{n}}(z) * * In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,1}]] * Out[]= (n SphericalBesselJ[n,a x])/x-a SphericalBesselJ[1+n,a x] * * In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,2}]] * Out[]= (((-1+n) n-a^2 x^2) SphericalBesselJ[n,a x]+2 a x SphericalBesselJ[1+n,a x])/x^2 */ for (int l = 0; l <= lmax_apw__; l++) { sbessel_mt(l, 1) = -sbessel_mt(l + 1, 0) * gkvec_len_[igk] + (l / R) * sbessel_mt(l, 0); sbessel_mt(l, 2) = 2 * gkvec_len_[igk] * sbessel_mt(l + 1, 0) / R + ((l - 1) * l - std::pow(RGk, 2)) * sbessel_mt(l, 0) / std::pow(R, 2); } for (int l = 0; l <= lmax_apw__; l++) { double_complex z = std::pow(double_complex(0, 1), l); double f = fourpi / std::sqrt(unit_cell_.omega()); alm_b_(0, igk, l, iat) = z * f * sbessel_mt(l, 0); alm_b_(1, igk, l, iat) = z * f * sbessel_mt(l, 1); alm_b_(2, igk, l, iat) = z * f * sbessel_mt(l, 2); } } } } /// Generate plane-wave matching coefficents for the radial solutions of a given atom. /** \param [in] ia Index of atom. * \param [out] alm Array of matching coefficients with dimension indices \f$ ({\bf G+k}, \xi) \f$. */ void generate(int ia, mdarray<double_complex, 2>& alm) const { auto& atom = unit_cell_.atom(ia); auto& type = atom.type(); assert(type.max_aw_order() <= 3); int iat = type.id(); std::vector<double_complex> phase_factors(num_gkvec_); for (int i = 0; i < num_gkvec_; i++) { double phase = twopi * (gkvec_.gkvec(igk_[i]) * unit_cell_.atom(ia).position()); phase_factors[i] = std::exp(double_complex(0, phase)); } matrix3d<double> A; for (int xi = 0; xi < type.mt_aw_basis_size(); xi++) { int l = type.indexb(xi).l; int lm = type.indexb(xi).lm; int nu = type.indexb(xi).order; /* order of augmentation for a given orbital quantum number */ int num_aw = static_cast<int>(type.aw_descriptor(l).size()); /* create matrix of radial derivatives */ for (int order = 0; order < num_aw; order++) { for (int dm = 0; dm < num_aw; dm++) { A(dm, order) = atom.symmetry_class().aw_surface_dm(l, order, dm); } } switch (num_aw) { /* APW */ case 1: { generate<1>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } /* LAPW */ case 2: { generate<2>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } /* Super LAPW */ case 3: { generate<3>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } default: { TERMINATE("wrong order of augmented wave"); } } } } }; } #endif // __MATCHING_COEFFICIENTS_H__
// Copyright (c) 2013-2015 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file matching_coefficients.h * * \brief Contains definition and partial implementation of sirius::Matching_coefficients class. */ #ifndef __MATCHING_COEFFICIENTS_H__ #define __MATCHING_COEFFICIENTS_H__ namespace sirius { /** The following matching conditions must be fulfilled: * \f[ * \frac{\partial^j}{\partial r^j} \sum_{L \nu} A_{L \nu}^{\bf k}({\bf G})u_{\ell \nu}(r) * Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}} = \frac{\partial^j}{\partial r^j} \frac{4 \pi}{\sqrt \Omega} * e^{i{\bf (G+k)\tau}} \sum_{L}i^{\ell} j_{\ell}(|{\bf G+k}|r) Y_{L}^{*}(\widehat {\bf G+k}) Y_{L}(\hat {\bf r}) \bigg|_{R^{MT}} * \f] * where \f$ L = \{ \ell, m \} \f$. Dropping sum over L we arrive to the following system of linear equations: * \f[ * \sum_{\nu} \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} A_{L \nu}^{\bf k}({\bf G}) = * \frac{4 \pi}{\sqrt \Omega} e^{i{\bf (G+k)\tau}} i^{\ell} \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j} * \bigg|_{R^{MT}} Y_{L}^{*}(\widehat {\bf G+k}) * \f] * The matching coefficients are then equal to: * \f[ * A_{L \nu}^{\bf k}({\bf G}) = \sum_{j} \bigg[ \frac{\partial^j u_{\ell \nu}(r)}{\partial r^j} \bigg|_{R^{MT}} \bigg]_{\nu j}^{-1} * \frac{\partial^j j_{\ell}(|{\bf G+k}|r)}{\partial r^j} \bigg|_{R^{MT}} \frac{4 \pi}{\sqrt \Omega} i^{\ell} * e^{i{\bf (G+k)\tau}} Y_{L}^{*}(\widehat {\bf G+k}) * \f] */ class Matching_coefficients { private: Unit_cell const& unit_cell_; int num_gkvec_; //std::vector<gklo_basis_descriptor> const& gklo_basis_descriptors_; std::vector<int>& igk_; Gvec const& gkvec_; mdarray<double_complex, 2> gkvec_ylm_; std::vector<double> gkvec_len_; /// Precomputed values for the linear equations for matching coefficients. mdarray<double_complex, 4> alm_b_; /// Generate matching coefficients for a specific \f$ \ell \f$ and order. /** \param [in] ngk Number of G+k vectors. * \param [in] ia Index of atom. * \param [in] iat Index of atom type. * \param [in] l Orbital quantum nuber. * \param [in] lm Composite l,m index. * \param [in] nu Order of radial function \f$ u_{\ell \nu}(r) \f$ for which coefficients are generated. * \param [inout] A Matrix of radial derivatives. * \param [out] alm Pointer to alm coefficients. */ template <int N> inline void generate(int ngk, std::vector<double_complex> const& phase_factors__, int iat, int l, int lm, int nu, matrix3d<double>& A, double_complex* alm) const { /* invert matrix of radial derivatives */ switch (N) { case 1: { #if (__VERIFICATION > 0) if (std::abs(A(0, 0)) < 1.0 / std::sqrt(unit_cell_.omega())) { std::stringstream s; s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl << " radial function value at the MT boundary : " << A(0, 0); WARNING(s.str()); } #endif A(0, 0) = 1.0 / A(0, 0); break; } case 2: { double det = A(0, 0) * A(1, 1) - A(0, 1) * A(1, 0); #if (__VERIFICATION > 0) if (std::abs(det) < 1.0 / std::sqrt(unit_cell_.omega())) { std::stringstream s; s << "Ill defined plane wave matching problem for atom type " << iat << ", l = " << l << std::endl << " radial function value at the MT boundary : " << A(0 ,0); WARNING(s.str()); } #endif std::swap(A(0, 0), A(1, 1)); A(0, 0) /= det; A(1, 1) /= det; A(0, 1) = -A(0, 1) / det; A(1, 0) = -A(1, 0) / det; break; } case 3: { A = inverse(A); break; } } double_complex zt; for (int igk = 0; igk < ngk; igk++) { switch (N) { case 1: { zt = alm_b_(0, igk, l, iat) * A(0, 0); break; } case 2: { zt = alm_b_(0, igk, l, iat) * A(nu, 0) + alm_b_(1, igk, l, iat) * A(nu, 1); break; } case 3: { zt = alm_b_(0, igk, l, iat) * A(nu, 0) + alm_b_(1, igk, l, iat) * A(nu, 1) + alm_b_(2, igk, l, iat) * A(nu, 2); break; } } alm[igk] = phase_factors__[igk] * std::conj(gkvec_ylm_(igk, lm)) * zt; } } public: /// Constructor Matching_coefficients(Unit_cell const& unit_cell__, int lmax_apw__, int num_gkvec__, std::vector<int>& igk__, Gvec const& gkvec__) : unit_cell_(unit_cell__), num_gkvec_(num_gkvec__), igk_(igk__), gkvec_(gkvec__) { int lmmax_apw = Utils::lmmax(lmax_apw__); gkvec_ylm_ = mdarray<double_complex, 2>(num_gkvec_, lmmax_apw); gkvec_len_.resize(num_gkvec_); /* get length and Ylm harmonics of G+k vectors */ #pragma omp parallel { std::vector<double_complex> ylm(lmmax_apw); #pragma omp for for (int i = 0; i < num_gkvec_; i++) { auto gkvec_cart = gkvec_.gkvec_cart(igk_[i]); /* get r, theta, phi */ auto vs = SHT::spherical_coordinates(gkvec_cart); /* get spherical harmonics */ SHT::spherical_harmonics(lmax_apw__, vs[1], vs[2], &ylm[0]); gkvec_len_[i] = vs[0]; for (int lm = 0; lm < lmmax_apw; lm++) { gkvec_ylm_(i, lm) = ylm[lm]; } } } alm_b_ = mdarray<double_complex, 4>(3, num_gkvec_, lmax_apw__ + 1, unit_cell_.num_atom_types()); alm_b_.zero(); /* value and first two derivatives of spherical Bessel functions */ mdarray<double, 2> sbessel_mt(lmax_apw__ + 2, 3); for (int igk = 0; igk < num_gkvec_; igk++) { for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { double R = unit_cell_.atom_type(iat).mt_radius(); double RGk = R * gkvec_len_[igk]; /* compute values and first and second derivatives of the spherical Bessel functions at the MT boundary */ gsl_sf_bessel_jl_array(lmax_apw__ + 1, RGk, &sbessel_mt(0, 0)); /* Bessel function derivative: f_{{n}}^{{\prime}}(z)=-f_{{n+1}}(z)+(n/z)f_{{n}}(z) * * In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,1}]] * Out[]= (n SphericalBesselJ[n,a x])/x-a SphericalBesselJ[1+n,a x] * * In[]:= FullSimplify[D[SphericalBesselJ[n,a*x],{x,2}]] * Out[]= (((-1+n) n-a^2 x^2) SphericalBesselJ[n,a x]+2 a x SphericalBesselJ[1+n,a x])/x^2 */ for (int l = 0; l <= lmax_apw__; l++) { sbessel_mt(l, 1) = -sbessel_mt(l + 1, 0) * gkvec_len_[igk] + (l / R) * sbessel_mt(l, 0); sbessel_mt(l, 2) = 2 * gkvec_len_[igk] * sbessel_mt(l + 1, 0) / R + ((l - 1) * l - std::pow(RGk, 2)) * sbessel_mt(l, 0) / std::pow(R, 2); } for (int l = 0; l <= lmax_apw__; l++) { double_complex z = std::pow(double_complex(0, 1), l); double f = fourpi / std::sqrt(unit_cell_.omega()); alm_b_(0, igk, l, iat) = z * f * sbessel_mt(l, 0); alm_b_(1, igk, l, iat) = z * f * sbessel_mt(l, 1); alm_b_(2, igk, l, iat) = z * f * sbessel_mt(l, 2); } } } } /// Generate plane-wave matching coefficents for the radial solutions of a given atom. /** \param [in] ia Index of atom. * \param [out] alm Array of matching coefficients with dimension indices \f$ ({\bf G+k}, \xi) \f$. */ void generate(int ia, mdarray<double_complex, 2>& alm) const { auto& atom = unit_cell_.atom(ia); auto& type = atom.type(); assert(type.max_aw_order() <= 3); int iat = type.id(); std::vector<double_complex> phase_factors(num_gkvec_); for (int i = 0; i < num_gkvec_; i++) { double phase = twopi * (gkvec_.gkvec(igk_[i]) * unit_cell_.atom(ia).position()); phase_factors[i] = std::exp(double_complex(0, phase)); } matrix3d<double> A; for (int xi = 0; xi < type.mt_aw_basis_size(); xi++) { int l = type.indexb(xi).l; int lm = type.indexb(xi).lm; int nu = type.indexb(xi).order; /* order of augmentation for a given orbital quantum number */ int num_aw = static_cast<int>(type.aw_descriptor(l).size()); /* create matrix of radial derivatives */ for (int order = 0; order < num_aw; order++) { for (int dm = 0; dm < num_aw; dm++) { A(dm, order) = atom.symmetry_class().aw_surface_dm(l, order, dm); } } switch (num_aw) { /* APW */ case 1: { generate<1>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } /* LAPW */ case 2: { generate<2>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } /* Super LAPW */ case 3: { generate<3>(num_gkvec_, phase_factors, iat, l, lm, nu, A, &alm(0, xi)); break; } default: { TERMINATE("wrong order of augmented wave"); } } } } }; } #endif // __MATCHING_COEFFICIENTS_H__
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** © 2009-2019 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ char *PNGQUANT_USAGE = "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n"; #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) #include <fcntl.h> /* O_BINARY */ #include <io.h> /* setmode() */ #include <locale.h> /* UTF-8 locale */ #else #include <unistd.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update or add -Ilib to compiler flags */ #include "pngquant_opts.h" char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (January 2022)"; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif void pngquant_internal_print_config(FILE *fd) { fputs("" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , fd); fflush(fd); } FILE *pngquant_c_stderr() { return stderr; } FILE *pngquant_c_stdout() { return stdout; } static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION); pngquant_internal_print_config(fd); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } if (options.print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options.missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options.print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif liq_attr *liq = liq_attr_create(); if (!liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options.iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options.verbose) { liq_set_log_callback(liq, log_callback, NULL); options.log_callback = log_callback; } if (options.last_index_transparent) { liq_set_last_index_transparent(liq, true); } if (options.speed >= 10) { options.fast_compression = true; if (options.speed == 11) { options.floyd = 0; options.speed = 10; } } if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options.extension && options.output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options.extension == NULL) { options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options.output_file_path && options.num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options.using_stdout && !options.using_stdin && options.num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (!options.num_files && !options.using_stdin) { fputs("No input files specified.\n", stderr); if (options.verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } retval = pngquant_main_internal(&options, liq); liq_attr_destroy(liq); return retval; } #endif // Don't use this. This is not a public API. pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (options->num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; liq_attr *local_liq = liq_attr_copy(liq); #ifdef _OPENMP struct buffered_log buf = {0}; if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) { liq_set_log_callback(local_liq, log_callback_buferred, &buf); liq_set_log_flush_callback(local_liq, log_callback_buferred_flush, &buf); opts.log_callback = log_callback_buferred; opts.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts, local_liq); } free(outname_free); liq_attr_destroy(local_liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(liq, options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq) { pngquant_error retval = SUCCESS; verbose_printf(liq, options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(liq, options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwhile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options, liq); if (TOO_LARGE_FILE == retval) { verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strcpy(outname, filename); if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(liq, options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, strip, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc((size_t)output_image->height * (size_t)output_image->width); output_image->row_pointers = malloc((size_t)output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
char *PNGQUANT_USAGE = "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n"; #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) #include <fcntl.h> /* O_BINARY */ #include <io.h> /* setmode() */ #include <locale.h> /* UTF-8 locale */ #else #include <unistd.h> #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update or add -Ilib to compiler flags */ #include "pngquant_opts.h" char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (January 2022)"; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } void pngquant_internal_print_config(FILE *fd) { fputs("" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , fd); fflush(fd); } FILE *pngquant_c_stderr() { return stderr; } FILE *pngquant_c_stdout() { return stdout; } static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION); pngquant_internal_print_config(fd); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } if (options.print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options.missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options.print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif liq_attr *liq = liq_attr_create(); if (!liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options.iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options.verbose) { liq_set_log_callback(liq, log_callback, NULL); options.log_callback = log_callback; } if (options.last_index_transparent) { liq_set_last_index_transparent(liq, true); } if (options.speed >= 10) { options.fast_compression = true; if (options.speed == 11) { options.floyd = 0; options.speed = 10; } } if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options.extension && options.output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options.extension == NULL) { options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options.output_file_path && options.num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options.using_stdout && !options.using_stdin && options.num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (!options.num_files && !options.using_stdin) { fputs("No input files specified.\n", stderr); if (options.verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } retval = pngquant_main_internal(&options, liq); liq_attr_destroy(liq); return retval; } #endif // Don't use this. This is not a public API. pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; liq_attr *local_liq = liq_attr_copy(liq); pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts, local_liq); } free(outname_free); liq_attr_destroy(local_liq); if (retval) { latest_error = retval; if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(liq, options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq) { pngquant_error retval = SUCCESS; verbose_printf(liq, options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(liq, options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwhile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options, liq); if (TOO_LARGE_FILE == retval) { verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strcpy(outname, filename); if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(liq, options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; retval = rwpng_read_image24(infile, input_image_p, strip, verbose); if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc((size_t)output_image->height * (size_t)output_image->width); output_image->row_pointers = malloc((size_t)output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
char *PNGQUANT_USAGE = "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n"; #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) #include <fcntl.h> /* O_BINARY */ #include <io.h> /* setmode() */ #include <locale.h> /* UTF-8 locale */ #else #include <unistd.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update or add -Ilib to compiler flags */ #include "pngquant_opts.h" char *PNGQUANT_VERSION = LIQ_VERSION_STRING " (January 2022)"; static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(liq_attr *liq, struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif void pngquant_internal_print_config(FILE *fd) { fputs("" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , fd); fflush(fd); } FILE *pngquant_c_stderr() { return stderr; } FILE *pngquant_c_stdout() { return stdout; } static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n", PNGQUANT_VERSION); pngquant_internal_print_config(fd); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } if (options.print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options.missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options.print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif liq_attr *liq = liq_attr_create(); if (!liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options.quality && !parse_quality(options.quality, liq, &options.min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options.iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options.verbose) { liq_set_log_callback(liq, log_callback, NULL); options.log_callback = log_callback; } if (options.last_index_transparent) { liq_set_last_index_transparent(liq, true); } if (options.speed >= 10) { options.fast_compression = true; if (options.speed == 11) { options.floyd = 0; options.speed = 10; } } if (options.speed && LIQ_OK != liq_set_speed(liq, options.speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options.colors && LIQ_OK != liq_set_max_colors(liq, options.colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options.posterize && LIQ_OK != liq_set_min_posterization(liq, options.posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options.extension && options.output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options.extension == NULL) { options.extension = options.floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options.output_file_path && options.num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options.using_stdout && !options.using_stdin && options.num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (!options.num_files && !options.using_stdin) { fputs("No input files specified.\n", stderr); if (options.verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } retval = pngquant_main_internal(&options, liq); liq_attr_destroy(liq); return retval; } #endif // Don't use this. This is not a public API. pngquant_error pngquant_main_internal(struct pngquant_options *options, liq_attr *liq) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setlocale(LC_ALL, ".65001"); // issue #376; set UTF-8 for Unicode filenames #endif if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (options->num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; liq_attr *local_liq = liq_attr_copy(liq); #ifdef _OPENMP struct buffered_log buf = {0}; if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) { liq_set_log_callback(local_liq, log_callback_buferred, &buf); liq_set_log_flush_callback(local_liq, log_callback_buferred_flush, &buf); opts.log_callback = log_callback_buferred; opts.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts, local_liq); } free(outname_free); liq_attr_destroy(local_liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(liq, options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(liq, options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(liq, options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options, liq_attr *liq) { pngquant_error retval = SUCCESS; verbose_printf(liq, options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(liq, options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(liq, options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(liq, options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(liq, options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(liq, options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(liq, options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(liq, options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwhile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options, liq); if (TOO_LARGE_FILE == retval) { verbose_printf(liq, options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(liq, options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options, liq); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strcpy(outname, filename); if (x > 4 && (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0)) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options, liq_attr *liq) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(liq, options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(liq, options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(liq, options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(liq, options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, strip, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc((size_t)output_image->height * (size_t)output_image->width); output_image->row_pointers = malloc((size_t)output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
convolution_1x1_pack8_fp16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_fp16_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output) { // src = kw-kh-inch-outch // dst = 8b-8a-kw-kh-inch/8a-outch/8b Mat weight_data_r2 = kernel.reshape(1, num_input, num_output); weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)2 * 64, 64); for (int q = 0; q + 7 < num_output; q += 8) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); const Mat k4 = weight_data_r2.channel(q + 4); const Mat k5 = weight_data_r2.channel(q + 5); const Mat k6 = weight_data_r2.channel(q + 6); const Mat k7 = weight_data_r2.channel(q + 7); Mat g0 = weight_data_pack8.channel(q / 8); for (int p = 0; p + 7 < num_input; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); unsigned short* g00 = (unsigned short*)g0.row(p / 8); g00[0] = float32_to_float16(k00[0]); g00[1] = float32_to_float16(k10[0]); g00[2] = float32_to_float16(k20[0]); g00[3] = float32_to_float16(k30[0]); g00[4] = float32_to_float16(k40[0]); g00[5] = float32_to_float16(k50[0]); g00[6] = float32_to_float16(k60[0]); g00[7] = float32_to_float16(k70[0]); g00 += 8; g00[0] = float32_to_float16(k01[0]); g00[1] = float32_to_float16(k11[0]); g00[2] = float32_to_float16(k21[0]); g00[3] = float32_to_float16(k31[0]); g00[4] = float32_to_float16(k41[0]); g00[5] = float32_to_float16(k51[0]); g00[6] = float32_to_float16(k61[0]); g00[7] = float32_to_float16(k71[0]); g00 += 8; g00[0] = float32_to_float16(k02[0]); g00[1] = float32_to_float16(k12[0]); g00[2] = float32_to_float16(k22[0]); g00[3] = float32_to_float16(k32[0]); g00[4] = float32_to_float16(k42[0]); g00[5] = float32_to_float16(k52[0]); g00[6] = float32_to_float16(k62[0]); g00[7] = float32_to_float16(k72[0]); g00 += 8; g00[0] = float32_to_float16(k03[0]); g00[1] = float32_to_float16(k13[0]); g00[2] = float32_to_float16(k23[0]); g00[3] = float32_to_float16(k33[0]); g00[4] = float32_to_float16(k43[0]); g00[5] = float32_to_float16(k53[0]); g00[6] = float32_to_float16(k63[0]); g00[7] = float32_to_float16(k73[0]); g00 += 8; g00[0] = float32_to_float16(k04[0]); g00[1] = float32_to_float16(k14[0]); g00[2] = float32_to_float16(k24[0]); g00[3] = float32_to_float16(k34[0]); g00[4] = float32_to_float16(k44[0]); g00[5] = float32_to_float16(k54[0]); g00[6] = float32_to_float16(k64[0]); g00[7] = float32_to_float16(k74[0]); g00 += 8; g00[0] = float32_to_float16(k05[0]); g00[1] = float32_to_float16(k15[0]); g00[2] = float32_to_float16(k25[0]); g00[3] = float32_to_float16(k35[0]); g00[4] = float32_to_float16(k45[0]); g00[5] = float32_to_float16(k55[0]); g00[6] = float32_to_float16(k65[0]); g00[7] = float32_to_float16(k75[0]); g00 += 8; g00[0] = float32_to_float16(k06[0]); g00[1] = float32_to_float16(k16[0]); g00[2] = float32_to_float16(k26[0]); g00[3] = float32_to_float16(k36[0]); g00[4] = float32_to_float16(k46[0]); g00[5] = float32_to_float16(k56[0]); g00[6] = float32_to_float16(k66[0]); g00[7] = float32_to_float16(k76[0]); g00 += 8; g00[0] = float32_to_float16(k07[0]); g00[1] = float32_to_float16(k17[0]); g00[2] = float32_to_float16(k27[0]); g00[3] = float32_to_float16(k37[0]); g00[4] = float32_to_float16(k47[0]); g00[5] = float32_to_float16(k57[0]); g00[6] = float32_to_float16(k67[0]); g00[7] = float32_to_float16(k77[0]); g00 += 8; } } } static void conv1x1s1_sgemm_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; int outw = top_blob.w; int outh = top_blob.h; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator); { int nn_size = size / 12; int remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 12; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); __m256 _r8 = _mm256_loadu_ps(img0 + 64); __m256 _r9 = _mm256_loadu_ps(img0 + 72); __m256 _r10 = _mm256_loadu_ps(img0 + 80); __m256 _r11 = _mm256_loadu_ps(img0 + 88); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); _mm256_storeu_ps(tmpptr + 64, _r8); _mm256_storeu_ps(tmpptr + 72, _r9); _mm256_storeu_ps(tmpptr + 80, _r10); _mm256_storeu_ps(tmpptr + 88, _r11); tmpptr += 96; img0 += bottom_blob.cstep * 8; } } nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); tmpptr += 64; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); tmpptr += 32; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); tmpptr += 16; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); _mm256_storeu_ps(tmpptr, _r0); tmpptr += 8; img0 += bottom_blob.cstep * 8; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); float* outptr = out; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; __m256 _sum8 = _bias0; __m256 _sum9 = _bias0; __m256 _sum10 = _bias0; __m256 _sum11 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7); __m256 _val80 = _mm256_broadcast_ss(tmpptr + 64); __m256 _val81 = _mm256_broadcast_ss(tmpptr + 65); __m256 _val82 = _mm256_broadcast_ss(tmpptr + 66); __m256 _val83 = _mm256_broadcast_ss(tmpptr + 67); __m256 _val84 = _mm256_broadcast_ss(tmpptr + 68); __m256 _val85 = _mm256_broadcast_ss(tmpptr + 69); __m256 _val86 = _mm256_broadcast_ss(tmpptr + 70); __m256 _val87 = _mm256_broadcast_ss(tmpptr + 71); __m256 _val90 = _mm256_broadcast_ss(tmpptr + 72); __m256 _val91 = _mm256_broadcast_ss(tmpptr + 73); __m256 _val92 = _mm256_broadcast_ss(tmpptr + 74); __m256 _val93 = _mm256_broadcast_ss(tmpptr + 75); __m256 _val94 = _mm256_broadcast_ss(tmpptr + 76); __m256 _val95 = _mm256_broadcast_ss(tmpptr + 77); __m256 _val96 = _mm256_broadcast_ss(tmpptr + 78); __m256 _val97 = _mm256_broadcast_ss(tmpptr + 79); _sum8 = _mm256_fmadd_ps(_w0, _val80, _sum8); _sum8 = _mm256_fmadd_ps(_w1, _val81, _sum8); _sum8 = _mm256_fmadd_ps(_w2, _val82, _sum8); _sum8 = _mm256_fmadd_ps(_w3, _val83, _sum8); _sum8 = _mm256_fmadd_ps(_w4, _val84, _sum8); _sum8 = _mm256_fmadd_ps(_w5, _val85, _sum8); _sum8 = _mm256_fmadd_ps(_w6, _val86, _sum8); _sum8 = _mm256_fmadd_ps(_w7, _val87, _sum8); _sum9 = _mm256_fmadd_ps(_w0, _val90, _sum9); _sum9 = _mm256_fmadd_ps(_w1, _val91, _sum9); _sum9 = _mm256_fmadd_ps(_w2, _val92, _sum9); _sum9 = _mm256_fmadd_ps(_w3, _val93, _sum9); _sum9 = _mm256_fmadd_ps(_w4, _val94, _sum9); _sum9 = _mm256_fmadd_ps(_w5, _val95, _sum9); _sum9 = _mm256_fmadd_ps(_w6, _val96, _sum9); _sum9 = _mm256_fmadd_ps(_w7, _val97, _sum9); __m256 _val100 = _mm256_broadcast_ss(tmpptr + 80); __m256 _val101 = _mm256_broadcast_ss(tmpptr + 81); __m256 _val102 = _mm256_broadcast_ss(tmpptr + 82); __m256 _val103 = _mm256_broadcast_ss(tmpptr + 83); __m256 _val104 = _mm256_broadcast_ss(tmpptr + 84); __m256 _val105 = _mm256_broadcast_ss(tmpptr + 85); __m256 _val106 = _mm256_broadcast_ss(tmpptr + 86); __m256 _val107 = _mm256_broadcast_ss(tmpptr + 87); __m256 _val110 = _mm256_broadcast_ss(tmpptr + 88); __m256 _val111 = _mm256_broadcast_ss(tmpptr + 89); __m256 _val112 = _mm256_broadcast_ss(tmpptr + 90); __m256 _val113 = _mm256_broadcast_ss(tmpptr + 91); __m256 _val114 = _mm256_broadcast_ss(tmpptr + 92); __m256 _val115 = _mm256_broadcast_ss(tmpptr + 93); __m256 _val116 = _mm256_broadcast_ss(tmpptr + 94); __m256 _val117 = _mm256_broadcast_ss(tmpptr + 95); _sum10 = _mm256_fmadd_ps(_w0, _val100, _sum10); _sum10 = _mm256_fmadd_ps(_w1, _val101, _sum10); _sum10 = _mm256_fmadd_ps(_w2, _val102, _sum10); _sum10 = _mm256_fmadd_ps(_w3, _val103, _sum10); _sum10 = _mm256_fmadd_ps(_w4, _val104, _sum10); _sum10 = _mm256_fmadd_ps(_w5, _val105, _sum10); _sum10 = _mm256_fmadd_ps(_w6, _val106, _sum10); _sum10 = _mm256_fmadd_ps(_w7, _val107, _sum10); _sum11 = _mm256_fmadd_ps(_w0, _val110, _sum11); _sum11 = _mm256_fmadd_ps(_w1, _val111, _sum11); _sum11 = _mm256_fmadd_ps(_w2, _val112, _sum11); _sum11 = _mm256_fmadd_ps(_w3, _val113, _sum11); _sum11 = _mm256_fmadd_ps(_w4, _val114, _sum11); _sum11 = _mm256_fmadd_ps(_w5, _val115, _sum11); _sum11 = _mm256_fmadd_ps(_w6, _val116, _sum11); _sum11 = _mm256_fmadd_ps(_w7, _val117, _sum11); tmpptr += 96; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); _mm256_storeu_ps(outptr + 64, _sum8); _mm256_storeu_ps(outptr + 72, _sum9); _mm256_storeu_ps(outptr + 80, _sum10); _mm256_storeu_ps(outptr + 88, _sum11); outptr += 96; } for (; i + 7 < size; i += 8) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7); tmpptr += 64; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); outptr += 64; } for (; i + 3 < size; i += 4) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); tmpptr += 32; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); outptr += 32; } for (; i + 1 < size; i += 2) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); tmpptr += 16; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); outptr += 16; } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); __m256 _sum = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val4 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val5 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val6 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val7 = _mm256_broadcast_ss(tmpptr + 7); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum = _mm256_fmadd_ps(_w0, _val0, _sum); _sum = _mm256_fmadd_ps(_w1, _val1, _sum); _sum = _mm256_fmadd_ps(_w2, _val2, _sum); _sum = _mm256_fmadd_ps(_w3, _val3, _sum); _sum = _mm256_fmadd_ps(_w4, _val4, _sum); _sum = _mm256_fmadd_ps(_w5, _val5, _sum); _sum = _mm256_fmadd_ps(_w6, _val6, _sum); _sum = _mm256_fmadd_ps(_w7, _val7, _sum); tmpptr += 8; kptr += 64; } _mm256_storeu_ps(outptr, _sum); outptr += 8; } } } static void conv1x1s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m256 _v = _mm256_loadu_ps(r0); _mm256_storeu_ps(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_fp16_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_fp16_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output) { // src = kw-kh-inch-outch // dst = 8b-8a-kw-kh-inch/8a-outch/8b Mat weight_data_r2 = kernel.reshape(1, num_input, num_output); weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)2 * 64, 64); for (int q = 0; q + 7 < num_output; q += 8) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); const Mat k4 = weight_data_r2.channel(q + 4); const Mat k5 = weight_data_r2.channel(q + 5); const Mat k6 = weight_data_r2.channel(q + 6); const Mat k7 = weight_data_r2.channel(q + 7); Mat g0 = weight_data_pack8.channel(q / 8); for (int p = 0; p + 7 < num_input; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); unsigned short* g00 = (unsigned short*)g0.row(p / 8); g00[0] = float32_to_float16(k00[0]); g00[1] = float32_to_float16(k10[0]); g00[2] = float32_to_float16(k20[0]); g00[3] = float32_to_float16(k30[0]); g00[4] = float32_to_float16(k40[0]); g00[5] = float32_to_float16(k50[0]); g00[6] = float32_to_float16(k60[0]); g00[7] = float32_to_float16(k70[0]); g00 += 8; g00[0] = float32_to_float16(k01[0]); g00[1] = float32_to_float16(k11[0]); g00[2] = float32_to_float16(k21[0]); g00[3] = float32_to_float16(k31[0]); g00[4] = float32_to_float16(k41[0]); g00[5] = float32_to_float16(k51[0]); g00[6] = float32_to_float16(k61[0]); g00[7] = float32_to_float16(k71[0]); g00 += 8; g00[0] = float32_to_float16(k02[0]); g00[1] = float32_to_float16(k12[0]); g00[2] = float32_to_float16(k22[0]); g00[3] = float32_to_float16(k32[0]); g00[4] = float32_to_float16(k42[0]); g00[5] = float32_to_float16(k52[0]); g00[6] = float32_to_float16(k62[0]); g00[7] = float32_to_float16(k72[0]); g00 += 8; g00[0] = float32_to_float16(k03[0]); g00[1] = float32_to_float16(k13[0]); g00[2] = float32_to_float16(k23[0]); g00[3] = float32_to_float16(k33[0]); g00[4] = float32_to_float16(k43[0]); g00[5] = float32_to_float16(k53[0]); g00[6] = float32_to_float16(k63[0]); g00[7] = float32_to_float16(k73[0]); g00 += 8; g00[0] = float32_to_float16(k04[0]); g00[1] = float32_to_float16(k14[0]); g00[2] = float32_to_float16(k24[0]); g00[3] = float32_to_float16(k34[0]); g00[4] = float32_to_float16(k44[0]); g00[5] = float32_to_float16(k54[0]); g00[6] = float32_to_float16(k64[0]); g00[7] = float32_to_float16(k74[0]); g00 += 8; g00[0] = float32_to_float16(k05[0]); g00[1] = float32_to_float16(k15[0]); g00[2] = float32_to_float16(k25[0]); g00[3] = float32_to_float16(k35[0]); g00[4] = float32_to_float16(k45[0]); g00[5] = float32_to_float16(k55[0]); g00[6] = float32_to_float16(k65[0]); g00[7] = float32_to_float16(k75[0]); g00 += 8; g00[0] = float32_to_float16(k06[0]); g00[1] = float32_to_float16(k16[0]); g00[2] = float32_to_float16(k26[0]); g00[3] = float32_to_float16(k36[0]); g00[4] = float32_to_float16(k46[0]); g00[5] = float32_to_float16(k56[0]); g00[6] = float32_to_float16(k66[0]); g00[7] = float32_to_float16(k76[0]); g00 += 8; g00[0] = float32_to_float16(k07[0]); g00[1] = float32_to_float16(k17[0]); g00[2] = float32_to_float16(k27[0]); g00[3] = float32_to_float16(k37[0]); g00[4] = float32_to_float16(k47[0]); g00[5] = float32_to_float16(k57[0]); g00[6] = float32_to_float16(k67[0]); g00[7] = float32_to_float16(k77[0]); g00 += 8; } } } static void conv1x1s1_sgemm_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; int outw = top_blob.w; int outh = top_blob.h; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator); { int nn_size = size / 12; int remain_size_start = nn_size * 12; for (int ii = 0; ii < nn_size; ii++) { int i = ii * 12; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); __m256 _r8 = _mm256_loadu_ps(img0 + 64); __m256 _r9 = _mm256_loadu_ps(img0 + 72); __m256 _r10 = _mm256_loadu_ps(img0 + 80); __m256 _r11 = _mm256_loadu_ps(img0 + 88); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); _mm256_storeu_ps(tmpptr + 64, _r8); _mm256_storeu_ps(tmpptr + 72, _r9); _mm256_storeu_ps(tmpptr + 80, _r10); _mm256_storeu_ps(tmpptr + 88, _r11); tmpptr += 96; img0 += bottom_blob.cstep * 8; } } nn_size = (size - remain_size_start) >> 3; for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); tmpptr += 64; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); tmpptr += 32; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); tmpptr += 16; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 1; for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); _mm256_storeu_ps(tmpptr, _r0); tmpptr += 8; img0 += bottom_blob.cstep * 8; } } } for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); float* outptr = out; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; __m256 _sum8 = _bias0; __m256 _sum9 = _bias0; __m256 _sum10 = _bias0; __m256 _sum11 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7); __m256 _val80 = _mm256_broadcast_ss(tmpptr + 64); __m256 _val81 = _mm256_broadcast_ss(tmpptr + 65); __m256 _val82 = _mm256_broadcast_ss(tmpptr + 66); __m256 _val83 = _mm256_broadcast_ss(tmpptr + 67); __m256 _val84 = _mm256_broadcast_ss(tmpptr + 68); __m256 _val85 = _mm256_broadcast_ss(tmpptr + 69); __m256 _val86 = _mm256_broadcast_ss(tmpptr + 70); __m256 _val87 = _mm256_broadcast_ss(tmpptr + 71); __m256 _val90 = _mm256_broadcast_ss(tmpptr + 72); __m256 _val91 = _mm256_broadcast_ss(tmpptr + 73); __m256 _val92 = _mm256_broadcast_ss(tmpptr + 74); __m256 _val93 = _mm256_broadcast_ss(tmpptr + 75); __m256 _val94 = _mm256_broadcast_ss(tmpptr + 76); __m256 _val95 = _mm256_broadcast_ss(tmpptr + 77); __m256 _val96 = _mm256_broadcast_ss(tmpptr + 78); __m256 _val97 = _mm256_broadcast_ss(tmpptr + 79); _sum8 = _mm256_fmadd_ps(_w0, _val80, _sum8); _sum8 = _mm256_fmadd_ps(_w1, _val81, _sum8); _sum8 = _mm256_fmadd_ps(_w2, _val82, _sum8); _sum8 = _mm256_fmadd_ps(_w3, _val83, _sum8); _sum8 = _mm256_fmadd_ps(_w4, _val84, _sum8); _sum8 = _mm256_fmadd_ps(_w5, _val85, _sum8); _sum8 = _mm256_fmadd_ps(_w6, _val86, _sum8); _sum8 = _mm256_fmadd_ps(_w7, _val87, _sum8); _sum9 = _mm256_fmadd_ps(_w0, _val90, _sum9); _sum9 = _mm256_fmadd_ps(_w1, _val91, _sum9); _sum9 = _mm256_fmadd_ps(_w2, _val92, _sum9); _sum9 = _mm256_fmadd_ps(_w3, _val93, _sum9); _sum9 = _mm256_fmadd_ps(_w4, _val94, _sum9); _sum9 = _mm256_fmadd_ps(_w5, _val95, _sum9); _sum9 = _mm256_fmadd_ps(_w6, _val96, _sum9); _sum9 = _mm256_fmadd_ps(_w7, _val97, _sum9); __m256 _val100 = _mm256_broadcast_ss(tmpptr + 80); __m256 _val101 = _mm256_broadcast_ss(tmpptr + 81); __m256 _val102 = _mm256_broadcast_ss(tmpptr + 82); __m256 _val103 = _mm256_broadcast_ss(tmpptr + 83); __m256 _val104 = _mm256_broadcast_ss(tmpptr + 84); __m256 _val105 = _mm256_broadcast_ss(tmpptr + 85); __m256 _val106 = _mm256_broadcast_ss(tmpptr + 86); __m256 _val107 = _mm256_broadcast_ss(tmpptr + 87); __m256 _val110 = _mm256_broadcast_ss(tmpptr + 88); __m256 _val111 = _mm256_broadcast_ss(tmpptr + 89); __m256 _val112 = _mm256_broadcast_ss(tmpptr + 90); __m256 _val113 = _mm256_broadcast_ss(tmpptr + 91); __m256 _val114 = _mm256_broadcast_ss(tmpptr + 92); __m256 _val115 = _mm256_broadcast_ss(tmpptr + 93); __m256 _val116 = _mm256_broadcast_ss(tmpptr + 94); __m256 _val117 = _mm256_broadcast_ss(tmpptr + 95); _sum10 = _mm256_fmadd_ps(_w0, _val100, _sum10); _sum10 = _mm256_fmadd_ps(_w1, _val101, _sum10); _sum10 = _mm256_fmadd_ps(_w2, _val102, _sum10); _sum10 = _mm256_fmadd_ps(_w3, _val103, _sum10); _sum10 = _mm256_fmadd_ps(_w4, _val104, _sum10); _sum10 = _mm256_fmadd_ps(_w5, _val105, _sum10); _sum10 = _mm256_fmadd_ps(_w6, _val106, _sum10); _sum10 = _mm256_fmadd_ps(_w7, _val107, _sum10); _sum11 = _mm256_fmadd_ps(_w0, _val110, _sum11); _sum11 = _mm256_fmadd_ps(_w1, _val111, _sum11); _sum11 = _mm256_fmadd_ps(_w2, _val112, _sum11); _sum11 = _mm256_fmadd_ps(_w3, _val113, _sum11); _sum11 = _mm256_fmadd_ps(_w4, _val114, _sum11); _sum11 = _mm256_fmadd_ps(_w5, _val115, _sum11); _sum11 = _mm256_fmadd_ps(_w6, _val116, _sum11); _sum11 = _mm256_fmadd_ps(_w7, _val117, _sum11); tmpptr += 96; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); _mm256_storeu_ps(outptr + 64, _sum8); _mm256_storeu_ps(outptr + 72, _sum9); _mm256_storeu_ps(outptr + 80, _sum10); _mm256_storeu_ps(outptr + 88, _sum11); outptr += 96; } for (; i + 7 < size; i += 8) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7); tmpptr += 64; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); outptr += 64; } for (; i + 3 < size; i += 4) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); tmpptr += 32; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); outptr += 32; } for (; i + 1 < size; i += 2) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); tmpptr += 16; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); outptr += 16; } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); __m256 _sum = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val4 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val5 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val6 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val7 = _mm256_broadcast_ss(tmpptr + 7); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum = _mm256_fmadd_ps(_w0, _val0, _sum); _sum = _mm256_fmadd_ps(_w1, _val1, _sum); _sum = _mm256_fmadd_ps(_w2, _val2, _sum); _sum = _mm256_fmadd_ps(_w3, _val3, _sum); _sum = _mm256_fmadd_ps(_w4, _val4, _sum); _sum = _mm256_fmadd_ps(_w5, _val5, _sum); _sum = _mm256_fmadd_ps(_w6, _val6, _sum); _sum = _mm256_fmadd_ps(_w7, _val7, _sum); tmpptr += 8; kptr += 64; } _mm256_storeu_ps(outptr, _sum); outptr += 8; } } } static void conv1x1s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m256 _v = _mm256_loadu_ps(r0); _mm256_storeu_ps(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_fp16_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_fp16_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output) { // src = kw-kh-inch-outch // dst = 8b-8a-kw-kh-inch/8a-outch/8b Mat weight_data_r2 = kernel.reshape(1, num_input, num_output); weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)2 * 64, 64); for (int q = 0; q + 7 < num_output; q += 8) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); const Mat k4 = weight_data_r2.channel(q + 4); const Mat k5 = weight_data_r2.channel(q + 5); const Mat k6 = weight_data_r2.channel(q + 6); const Mat k7 = weight_data_r2.channel(q + 7); Mat g0 = weight_data_pack8.channel(q / 8); for (int p = 0; p + 7 < num_input; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); unsigned short* g00 = (unsigned short*)g0.row(p / 8); g00[0] = float32_to_float16(k00[0]); g00[1] = float32_to_float16(k10[0]); g00[2] = float32_to_float16(k20[0]); g00[3] = float32_to_float16(k30[0]); g00[4] = float32_to_float16(k40[0]); g00[5] = float32_to_float16(k50[0]); g00[6] = float32_to_float16(k60[0]); g00[7] = float32_to_float16(k70[0]); g00 += 8; g00[0] = float32_to_float16(k01[0]); g00[1] = float32_to_float16(k11[0]); g00[2] = float32_to_float16(k21[0]); g00[3] = float32_to_float16(k31[0]); g00[4] = float32_to_float16(k41[0]); g00[5] = float32_to_float16(k51[0]); g00[6] = float32_to_float16(k61[0]); g00[7] = float32_to_float16(k71[0]); g00 += 8; g00[0] = float32_to_float16(k02[0]); g00[1] = float32_to_float16(k12[0]); g00[2] = float32_to_float16(k22[0]); g00[3] = float32_to_float16(k32[0]); g00[4] = float32_to_float16(k42[0]); g00[5] = float32_to_float16(k52[0]); g00[6] = float32_to_float16(k62[0]); g00[7] = float32_to_float16(k72[0]); g00 += 8; g00[0] = float32_to_float16(k03[0]); g00[1] = float32_to_float16(k13[0]); g00[2] = float32_to_float16(k23[0]); g00[3] = float32_to_float16(k33[0]); g00[4] = float32_to_float16(k43[0]); g00[5] = float32_to_float16(k53[0]); g00[6] = float32_to_float16(k63[0]); g00[7] = float32_to_float16(k73[0]); g00 += 8; g00[0] = float32_to_float16(k04[0]); g00[1] = float32_to_float16(k14[0]); g00[2] = float32_to_float16(k24[0]); g00[3] = float32_to_float16(k34[0]); g00[4] = float32_to_float16(k44[0]); g00[5] = float32_to_float16(k54[0]); g00[6] = float32_to_float16(k64[0]); g00[7] = float32_to_float16(k74[0]); g00 += 8; g00[0] = float32_to_float16(k05[0]); g00[1] = float32_to_float16(k15[0]); g00[2] = float32_to_float16(k25[0]); g00[3] = float32_to_float16(k35[0]); g00[4] = float32_to_float16(k45[0]); g00[5] = float32_to_float16(k55[0]); g00[6] = float32_to_float16(k65[0]); g00[7] = float32_to_float16(k75[0]); g00 += 8; g00[0] = float32_to_float16(k06[0]); g00[1] = float32_to_float16(k16[0]); g00[2] = float32_to_float16(k26[0]); g00[3] = float32_to_float16(k36[0]); g00[4] = float32_to_float16(k46[0]); g00[5] = float32_to_float16(k56[0]); g00[6] = float32_to_float16(k66[0]); g00[7] = float32_to_float16(k76[0]); g00 += 8; g00[0] = float32_to_float16(k07[0]); g00[1] = float32_to_float16(k17[0]); g00[2] = float32_to_float16(k27[0]); g00[3] = float32_to_float16(k37[0]); g00[4] = float32_to_float16(k47[0]); g00[5] = float32_to_float16(k57[0]); g00[6] = float32_to_float16(k67[0]); g00[7] = float32_to_float16(k77[0]); g00 += 8; } } } static void conv1x1s1_sgemm_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; int outw = top_blob.w; int outh = top_blob.h; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator); { int nn_size = size / 12; int remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 12; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); __m256 _r8 = _mm256_loadu_ps(img0 + 64); __m256 _r9 = _mm256_loadu_ps(img0 + 72); __m256 _r10 = _mm256_loadu_ps(img0 + 80); __m256 _r11 = _mm256_loadu_ps(img0 + 88); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); _mm256_storeu_ps(tmpptr + 64, _r8); _mm256_storeu_ps(tmpptr + 72, _r9); _mm256_storeu_ps(tmpptr + 80, _r10); _mm256_storeu_ps(tmpptr + 88, _r11); tmpptr += 96; img0 += bottom_blob.cstep * 8; } } nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); __m256 _r4 = _mm256_loadu_ps(img0 + 32); __m256 _r5 = _mm256_loadu_ps(img0 + 40); __m256 _r6 = _mm256_loadu_ps(img0 + 48); __m256 _r7 = _mm256_loadu_ps(img0 + 56); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); _mm256_storeu_ps(tmpptr + 32, _r4); _mm256_storeu_ps(tmpptr + 40, _r5); _mm256_storeu_ps(tmpptr + 48, _r6); _mm256_storeu_ps(tmpptr + 56, _r7); tmpptr += 64; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); __m256 _r2 = _mm256_loadu_ps(img0 + 16); __m256 _r3 = _mm256_loadu_ps(img0 + 24); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); _mm256_storeu_ps(tmpptr + 16, _r2); _mm256_storeu_ps(tmpptr + 24, _r3); tmpptr += 32; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); __m256 _r1 = _mm256_loadu_ps(img0 + 8); _mm256_storeu_ps(tmpptr, _r0); _mm256_storeu_ps(tmpptr + 8, _r1); tmpptr += 16; img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { __m256 _r0 = _mm256_loadu_ps(img0); _mm256_storeu_ps(tmpptr, _r0); tmpptr += 8; img0 += bottom_blob.cstep * 8; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); float* outptr = out; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; __m256 _sum8 = _bias0; __m256 _sum9 = _bias0; __m256 _sum10 = _bias0; __m256 _sum11 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7); __m256 _val80 = _mm256_broadcast_ss(tmpptr + 64); __m256 _val81 = _mm256_broadcast_ss(tmpptr + 65); __m256 _val82 = _mm256_broadcast_ss(tmpptr + 66); __m256 _val83 = _mm256_broadcast_ss(tmpptr + 67); __m256 _val84 = _mm256_broadcast_ss(tmpptr + 68); __m256 _val85 = _mm256_broadcast_ss(tmpptr + 69); __m256 _val86 = _mm256_broadcast_ss(tmpptr + 70); __m256 _val87 = _mm256_broadcast_ss(tmpptr + 71); __m256 _val90 = _mm256_broadcast_ss(tmpptr + 72); __m256 _val91 = _mm256_broadcast_ss(tmpptr + 73); __m256 _val92 = _mm256_broadcast_ss(tmpptr + 74); __m256 _val93 = _mm256_broadcast_ss(tmpptr + 75); __m256 _val94 = _mm256_broadcast_ss(tmpptr + 76); __m256 _val95 = _mm256_broadcast_ss(tmpptr + 77); __m256 _val96 = _mm256_broadcast_ss(tmpptr + 78); __m256 _val97 = _mm256_broadcast_ss(tmpptr + 79); _sum8 = _mm256_fmadd_ps(_w0, _val80, _sum8); _sum8 = _mm256_fmadd_ps(_w1, _val81, _sum8); _sum8 = _mm256_fmadd_ps(_w2, _val82, _sum8); _sum8 = _mm256_fmadd_ps(_w3, _val83, _sum8); _sum8 = _mm256_fmadd_ps(_w4, _val84, _sum8); _sum8 = _mm256_fmadd_ps(_w5, _val85, _sum8); _sum8 = _mm256_fmadd_ps(_w6, _val86, _sum8); _sum8 = _mm256_fmadd_ps(_w7, _val87, _sum8); _sum9 = _mm256_fmadd_ps(_w0, _val90, _sum9); _sum9 = _mm256_fmadd_ps(_w1, _val91, _sum9); _sum9 = _mm256_fmadd_ps(_w2, _val92, _sum9); _sum9 = _mm256_fmadd_ps(_w3, _val93, _sum9); _sum9 = _mm256_fmadd_ps(_w4, _val94, _sum9); _sum9 = _mm256_fmadd_ps(_w5, _val95, _sum9); _sum9 = _mm256_fmadd_ps(_w6, _val96, _sum9); _sum9 = _mm256_fmadd_ps(_w7, _val97, _sum9); __m256 _val100 = _mm256_broadcast_ss(tmpptr + 80); __m256 _val101 = _mm256_broadcast_ss(tmpptr + 81); __m256 _val102 = _mm256_broadcast_ss(tmpptr + 82); __m256 _val103 = _mm256_broadcast_ss(tmpptr + 83); __m256 _val104 = _mm256_broadcast_ss(tmpptr + 84); __m256 _val105 = _mm256_broadcast_ss(tmpptr + 85); __m256 _val106 = _mm256_broadcast_ss(tmpptr + 86); __m256 _val107 = _mm256_broadcast_ss(tmpptr + 87); __m256 _val110 = _mm256_broadcast_ss(tmpptr + 88); __m256 _val111 = _mm256_broadcast_ss(tmpptr + 89); __m256 _val112 = _mm256_broadcast_ss(tmpptr + 90); __m256 _val113 = _mm256_broadcast_ss(tmpptr + 91); __m256 _val114 = _mm256_broadcast_ss(tmpptr + 92); __m256 _val115 = _mm256_broadcast_ss(tmpptr + 93); __m256 _val116 = _mm256_broadcast_ss(tmpptr + 94); __m256 _val117 = _mm256_broadcast_ss(tmpptr + 95); _sum10 = _mm256_fmadd_ps(_w0, _val100, _sum10); _sum10 = _mm256_fmadd_ps(_w1, _val101, _sum10); _sum10 = _mm256_fmadd_ps(_w2, _val102, _sum10); _sum10 = _mm256_fmadd_ps(_w3, _val103, _sum10); _sum10 = _mm256_fmadd_ps(_w4, _val104, _sum10); _sum10 = _mm256_fmadd_ps(_w5, _val105, _sum10); _sum10 = _mm256_fmadd_ps(_w6, _val106, _sum10); _sum10 = _mm256_fmadd_ps(_w7, _val107, _sum10); _sum11 = _mm256_fmadd_ps(_w0, _val110, _sum11); _sum11 = _mm256_fmadd_ps(_w1, _val111, _sum11); _sum11 = _mm256_fmadd_ps(_w2, _val112, _sum11); _sum11 = _mm256_fmadd_ps(_w3, _val113, _sum11); _sum11 = _mm256_fmadd_ps(_w4, _val114, _sum11); _sum11 = _mm256_fmadd_ps(_w5, _val115, _sum11); _sum11 = _mm256_fmadd_ps(_w6, _val116, _sum11); _sum11 = _mm256_fmadd_ps(_w7, _val117, _sum11); tmpptr += 96; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); _mm256_storeu_ps(outptr + 64, _sum8); _mm256_storeu_ps(outptr + 72, _sum9); _mm256_storeu_ps(outptr + 80, _sum10); _mm256_storeu_ps(outptr + 88, _sum11); outptr += 96; } for (; i + 7 < size; i += 8) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; __m256 _sum4 = _bias0; __m256 _sum5 = _bias0; __m256 _sum6 = _bias0; __m256 _sum7 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); __m256 _val40 = _mm256_broadcast_ss(tmpptr + 32); __m256 _val41 = _mm256_broadcast_ss(tmpptr + 33); __m256 _val42 = _mm256_broadcast_ss(tmpptr + 34); __m256 _val43 = _mm256_broadcast_ss(tmpptr + 35); __m256 _val44 = _mm256_broadcast_ss(tmpptr + 36); __m256 _val45 = _mm256_broadcast_ss(tmpptr + 37); __m256 _val46 = _mm256_broadcast_ss(tmpptr + 38); __m256 _val47 = _mm256_broadcast_ss(tmpptr + 39); __m256 _val50 = _mm256_broadcast_ss(tmpptr + 40); __m256 _val51 = _mm256_broadcast_ss(tmpptr + 41); __m256 _val52 = _mm256_broadcast_ss(tmpptr + 42); __m256 _val53 = _mm256_broadcast_ss(tmpptr + 43); __m256 _val54 = _mm256_broadcast_ss(tmpptr + 44); __m256 _val55 = _mm256_broadcast_ss(tmpptr + 45); __m256 _val56 = _mm256_broadcast_ss(tmpptr + 46); __m256 _val57 = _mm256_broadcast_ss(tmpptr + 47); _sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4); _sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4); _sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4); _sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4); _sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4); _sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4); _sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4); _sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4); _sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5); _sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5); _sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5); _sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5); _sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5); _sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5); _sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5); _sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5); __m256 _val60 = _mm256_broadcast_ss(tmpptr + 48); __m256 _val61 = _mm256_broadcast_ss(tmpptr + 49); __m256 _val62 = _mm256_broadcast_ss(tmpptr + 50); __m256 _val63 = _mm256_broadcast_ss(tmpptr + 51); __m256 _val64 = _mm256_broadcast_ss(tmpptr + 52); __m256 _val65 = _mm256_broadcast_ss(tmpptr + 53); __m256 _val66 = _mm256_broadcast_ss(tmpptr + 54); __m256 _val67 = _mm256_broadcast_ss(tmpptr + 55); __m256 _val70 = _mm256_broadcast_ss(tmpptr + 56); __m256 _val71 = _mm256_broadcast_ss(tmpptr + 57); __m256 _val72 = _mm256_broadcast_ss(tmpptr + 58); __m256 _val73 = _mm256_broadcast_ss(tmpptr + 59); __m256 _val74 = _mm256_broadcast_ss(tmpptr + 60); __m256 _val75 = _mm256_broadcast_ss(tmpptr + 61); __m256 _val76 = _mm256_broadcast_ss(tmpptr + 62); __m256 _val77 = _mm256_broadcast_ss(tmpptr + 63); _sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6); _sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6); _sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6); _sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6); _sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6); _sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6); _sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6); _sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6); _sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7); _sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7); _sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7); _sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7); _sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7); _sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7); _sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7); _sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7); tmpptr += 64; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); _mm256_storeu_ps(outptr + 32, _sum4); _mm256_storeu_ps(outptr + 40, _sum5); _mm256_storeu_ps(outptr + 48, _sum6); _mm256_storeu_ps(outptr + 56, _sum7); outptr += 64; } for (; i + 3 < size; i += 4) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; __m256 _sum2 = _bias0; __m256 _sum3 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); __m256 _val20 = _mm256_broadcast_ss(tmpptr + 16); __m256 _val21 = _mm256_broadcast_ss(tmpptr + 17); __m256 _val22 = _mm256_broadcast_ss(tmpptr + 18); __m256 _val23 = _mm256_broadcast_ss(tmpptr + 19); __m256 _val24 = _mm256_broadcast_ss(tmpptr + 20); __m256 _val25 = _mm256_broadcast_ss(tmpptr + 21); __m256 _val26 = _mm256_broadcast_ss(tmpptr + 22); __m256 _val27 = _mm256_broadcast_ss(tmpptr + 23); __m256 _val30 = _mm256_broadcast_ss(tmpptr + 24); __m256 _val31 = _mm256_broadcast_ss(tmpptr + 25); __m256 _val32 = _mm256_broadcast_ss(tmpptr + 26); __m256 _val33 = _mm256_broadcast_ss(tmpptr + 27); __m256 _val34 = _mm256_broadcast_ss(tmpptr + 28); __m256 _val35 = _mm256_broadcast_ss(tmpptr + 29); __m256 _val36 = _mm256_broadcast_ss(tmpptr + 30); __m256 _val37 = _mm256_broadcast_ss(tmpptr + 31); _sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2); _sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2); _sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2); _sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2); _sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2); _sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2); _sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2); _sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2); _sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3); _sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3); _sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3); _sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3); _sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3); _sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3); _sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3); _sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3); tmpptr += 32; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); _mm256_storeu_ps(outptr + 16, _sum2); _mm256_storeu_ps(outptr + 24, _sum3); outptr += 32; } for (; i + 1 < size; i += 2) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); __m256 _sum0 = _bias0; __m256 _sum1 = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val00 = _mm256_broadcast_ss(tmpptr); __m256 _val01 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val02 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val03 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val04 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val05 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val06 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val07 = _mm256_broadcast_ss(tmpptr + 7); __m256 _val10 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val11 = _mm256_broadcast_ss(tmpptr + 9); __m256 _val12 = _mm256_broadcast_ss(tmpptr + 10); __m256 _val13 = _mm256_broadcast_ss(tmpptr + 11); __m256 _val14 = _mm256_broadcast_ss(tmpptr + 12); __m256 _val15 = _mm256_broadcast_ss(tmpptr + 13); __m256 _val16 = _mm256_broadcast_ss(tmpptr + 14); __m256 _val17 = _mm256_broadcast_ss(tmpptr + 15); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0); _sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0); _sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0); _sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0); _sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0); _sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0); _sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0); _sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0); _sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1); _sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1); _sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1); _sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1); _sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1); _sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1); _sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1); _sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1); tmpptr += 16; kptr += 64; } _mm256_storeu_ps(outptr, _sum0); _mm256_storeu_ps(outptr + 8, _sum1); outptr += 16; } for (; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); __m256 _sum = _bias0; const unsigned short* kptr = (const unsigned short*)kernel + p * inch * 64; for (int q = 0; q < inch; q++) { __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); __m256 _val4 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val5 = _mm256_broadcast_ss(tmpptr + 5); __m256 _val6 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val7 = _mm256_broadcast_ss(tmpptr + 7); __m256 _w0 = loadfp16(kptr); __m256 _w1 = loadfp16(kptr + 8); __m256 _w2 = loadfp16(kptr + 16); __m256 _w3 = loadfp16(kptr + 24); __m256 _w4 = loadfp16(kptr + 32); __m256 _w5 = loadfp16(kptr + 40); __m256 _w6 = loadfp16(kptr + 48); __m256 _w7 = loadfp16(kptr + 56); _sum = _mm256_fmadd_ps(_w0, _val0, _sum); _sum = _mm256_fmadd_ps(_w1, _val1, _sum); _sum = _mm256_fmadd_ps(_w2, _val2, _sum); _sum = _mm256_fmadd_ps(_w3, _val3, _sum); _sum = _mm256_fmadd_ps(_w4, _val4, _sum); _sum = _mm256_fmadd_ps(_w5, _val5, _sum); _sum = _mm256_fmadd_ps(_w6, _val6, _sum); _sum = _mm256_fmadd_ps(_w7, _val7, _sum); tmpptr += 8; kptr += 64; } _mm256_storeu_ps(outptr, _sum); outptr += 8; } } } static void conv1x1s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const float* r0 = bottom_blob.channel(p); float* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m256 _v = _mm256_loadu_ps(r0); _mm256_storeu_ps(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_fp16_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
DRB099-targetparallelfor2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* use of omp target + map + array sections derived from pointers */ void foo (double* a, double* b, int N) { int i; #pragma omp parallel for for (i=0;i< N ;i++) b[i]=a[i]*(double)i; } int main(int argc, char* argv[]) { int i; int len = 1000; double a[len], b[len]; #pragma omp parallel for for (i=0; i<len; i++) { a[i]= ((double)i)/2.0; b[i]=0.0; } foo(a, b, len); printf("b[50]=%f\n",b[50]); return 0; }
#include <stdio.h> /* * use of omp target + map + array sections derived from pointers */ void foo(double *a, double *b, int N) { int i; for (i = 0; i < N; i++) b[i] = a[i] * (double)i; } int main(int argc, char *argv[]) { int i; int len = 1000; double a[len], b[len]; for (i = 0; i < len; i++) { a[i] = ((double)i) / 2.0; b[i] = 0.0; } foo(a, b, len); printf("b[50]=%f\n", b[50]); return 0; }
#include <stdio.h> /* * use of omp target + map + array sections derived from pointers */ void foo(double *a, double *b, int N) { int i; #pragma omp parallel for for (i = 0; i < N; i++) b[i] = a[i] * (double)i; } int main(int argc, char *argv[]) { int i; int len = 1000; double a[len], b[len]; #pragma omp parallel for for (i = 0; i < len; i++) { a[i] = ((double)i) / 2.0; b[i] = 0.0; } foo(a, b, len); printf("b[50]=%f\n", b[50]); return 0; }
DenseLayer.c
/* * DenseLayer.c * Francesco Conti <f.conti@unibo.it> * * Copyright (C) 2015 ETH Zurich, University of Bologna * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #include "linalg.h" #include "tiling.h" #include "DenseLayer.h" #ifdef CCN_TILING_LESSTIME #define _dense_tiling_init(); \ unsigned char (*tile_grid_non)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_non; \ unsigned char (*tile_grid_nin)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_nin; \ int _non = tile_grid_non[aa][bb]; \ int _nin = tile_grid_nin[aa][bb]; #else /* ~CCN_TILING_LESSTIME */ #define _dense_tiling_init(); \ int _non = (aa < layer->ntile_full_non) ? layer->tiling_max_non : layer->tlast_non; \ int _nin = (bb < layer->ntile_full_nin) ? layer->tiling_max_nin : layer->tlast_nin; #endif /* ~CCN_TILING_LESSTIME */ #define _dense_notiling_init(); \ int _non = layer->n_out_neurons; \ int _nin = layer->n_in_neurons; /** * Allocates a new DenseLayer data structure and its fields (weight, bias, * output feature maps). * * @return a pointer to the new DenseLayer data structure. * * @param n_in_neurons * the number of input feature maps. * @param n_out_neurons * the number of output feature maps. * @param input_height * the height of the input feature maps. * @param input_width * the width of the input feature maps. * @param output_height * the height of the output feature maps. * @param output_width * the width of the output feature maps. * @param activation * 1 if activation is tanh, 0 if no activation. * @param *x * a *mandatory* pointer to the input feature maps. * @param *y * an *optional* pointer to the already-allocated output feature maps. If * NULL, DenseLayer_new() will allocate y automatically. */ DenseLayer *DenseLayer_new( #ifdef CCN_NOALLOC DenseLayer *layer, #endif /* CCN_NOALLOC */ const char *name, data_t *w, data_t *b, data_t *x, data_t *y, data_t *loc_x0, data_t *loc_x1, data_t *loc_y0, data_t *loc_y1, data_t *loc_y2, data_t *loc_w0, data_t *loc_w1, data_t *loc_b, int n_out_neurons, int n_in_neurons, int activation, int tiling_max_non, int tiling_max_nin, unsigned qf ) { #ifndef CCN_NOALLOC // build DenseLayer DenseLayer *layer; layer = ccn_malloc(sizeof(DenseLayer)); #endif /* ifndef CCN_NOALLOC */ layer->name = name; layer->n_in_neurons = n_in_neurons; layer->n_out_neurons = n_out_neurons; layer->activation = activation; layer->w = w; layer->b = b; layer->x = x; layer->y = y; layer->qf = qf; #ifndef CCN_CACHE layer->loc_x0 = loc_x0; layer->loc_y0 = loc_y0; layer->loc_x1 = loc_x1; layer->loc_y1 = loc_y1; layer->loc_y2 = loc_y2; layer->loc_w0 = loc_w0; layer->loc_w1 = loc_w1; layer->loc_b = loc_b; #endif /* ifndef CCN_CACHE */ layer->tiling_max_non = tiling_max_non; layer->tiling_max_nin = tiling_max_nin; #ifdef CCN_TILING // define and record the number of tiles int ntile_non = (n_out_neurons % tiling_max_non ) ? n_out_neurons / tiling_max_non + 1 : n_out_neurons / tiling_max_non; int ntile_nin = (n_in_neurons % tiling_max_nin ) ? n_in_neurons / tiling_max_nin + 1 : n_in_neurons / tiling_max_nin; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #ifdef CCN_TILING_LESSMEM layer->tlast_non = n_out_neurons % tiling_max_non; layer->tlast_nin = n_in_neurons % tiling_max_nin; layer->ntile_full_non = ntile_non; layer->ntile_full_nin = ntile_nin; #else /* ~CCN_TILING_LESSMEM */ // allocate the tile grid in a flat fashion layer->tile_grid_non = ccn_malloc(sizeof(unsigned char)*(ntile_non+NB_PIPE_STAGE-1)*ntile_nin); layer->tile_grid_nin = ccn_malloc(sizeof(unsigned char)*(ntile_non+NB_PIPE_STAGE-1)*ntile_nin); // cast the tile grid to a 4-dimensional array unsigned char (*tile_grid_non)[ntile_nin] = layer->tile_grid_non; unsigned char (*tile_grid_nin)[ntile_nin] = layer->tile_grid_nin; #endif /* ~CCN_TILING_LESSMEM */ // fill in the tile grid int aa, bb; for(aa=0; aa<layer->ntile_non; aa++) { for(bb=0; bb<layer->ntile_nin; bb++) { #ifdef CCN_TILING_LESSTIME if(bb*tiling_max_nin > n_in_neurons-tiling_max_nin) { tile_grid_nin[aa][bb] = (unsigned char) n_in_neurons % tiling_max_nin; } else { tile_grid_nin[aa][bb] = (unsigned char) tiling_max_nin; } if(aa*tiling_max_non > n_out_neurons-tiling_max_non) { tile_grid_non[aa][bb] = (unsigned char) n_out_neurons % tiling_max_non; } else { tile_grid_non[aa][bb] = (unsigned char) tiling_max_non; } #else /* ~CCN_TILING_LESSTIME */ if(bb*tiling_max_nin > n_in_neurons-tiling_max_nin) { layer->ntile_full_nin = bb; } if(aa*tiling_max_non > n_out_neurons-tiling_max_non) { layer->ntile_full_non = aa; } #endif /* ~CCN_TILING_LESSTIME */ } } #ifdef CCN_TILING_LESSTIME for(aa=layer->ntile_non; aa<layer->ntile_non+NB_PIPE_STAGE-1; aa++) { for(bb=0; bb<layer->ntile_nin; bb++) { tile_grid_nin[aa][bb] = tiling_max_nin; tile_grid_non[aa][bb] = tiling_max_non; } } #endif /* CCN_TILING_LESSTIME */ #else /* ~CCN_TILING */ // no tile grid int ntile_non = n_out_neurons; int ntile_nin = n_in_neurons; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #endif /* ~CCN_TILING */ #ifdef TILING_DEBUG printf("[DenseLayer %s] NOn grid:\n", layer->name); for(aa=0; aa<layer->ntile_non; aa++) { printf(" "); for(bb=0; bb<layer->ntile_nin; bb++) { printf("%d ", tile_grid_non[aa][bb]); } printf("\n"); } printf("[DenseLayer %s] NIn grid:\n", layer->name); for(aa=0; aa<layer->ntile_non; aa++) { printf(" "); for(bb=0; bb<layer->ntile_nin; bb++) { printf("%d ", tile_grid_nin[aa][bb]); } printf("\n"); } #endif /* TILING_DEBUG */ return layer; } void DenseLayer_delete( DenseLayer *layer ) { #ifndef CCN_CACHE ccn_free(layer->loc_w0); ccn_free(layer->loc_w1); ccn_free(layer->loc_b); #endif /* ~CCN_CACHE */ #ifdef CCN_TILING ccn_free(layer->tile_grid_non); ccn_free(layer->tile_grid_nin); #endif /* ~CCN_TILING */ ccn_free(layer); } static void DenseLayer_pipe_fe( DenseLayer *layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif // if aa is -1, it means that this is the last tile (and bb, ii, jj also = -1) if(aa==-1) return; #ifdef FETCH_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* FETCH_PROFILE */ { _dense_tiling_init() data_t *l2_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *l2_W = ccn_get_tile_2d( layer->w, bb, aa, layer->tiling_max_nin, layer->tiling_max_non, layer->n_out_neurons ); // X tile copy-in ccn_memcpy_async( layer->loc_x_fe, // pointers l2_x, _nin*sizeof(data_t) ); // W copy-in (check misalignment) ccn_memcpy_async_2d( layer->loc_w_fe, // pointers l2_W, _nin, // sizes _non*sizeof(data_t), _non*sizeof(data_t), // local strides layer->n_out_neurons*sizeof(data_t) // remote strides ); // b copy-in if(bb==0) { ccn_memcpy_async( layer->loc_b, &layer->b[aa*layer->tiling_max_non], _non*sizeof(data_t) ); } #ifdef FETCH_CHECKSUM int32_t sum_x = 0; int32_t sum_W = 0; int32_t sum_y = 0; for(int i=0; i<_nin; i++) { sum_x += layer->loc_x_fe[i]; } for(int i=0; i<_non*_nin; i++) { sum_W += layer->loc_w_fe[i]; } for(int i=0; i<_non; i++) { sum_y += layer->loc_y_fe[i]; } printf("[DenseLayer %s] Fetch checksum %d,%d: x=%d W=%d y=%d\n", layer->name, aa, bb, sum_x, sum_W, sum_y); #endif /* FETCH_CHECKSUM */ } #ifdef FETCH_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Fetch profiling: %d\n", layer->name, t0); #endif /* FETCH_PROFILE */ } static void DenseLayer_pipe_ex( DenseLayer *layer, int aa, int bb ) { // if aa is -1, it means that this is the first tile (and bb, ii, jj also = -1) if(aa==-1) return; #ifdef EXECUTE_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* EXECUTE_PROFILE */ // #pragma omp single nowait { #ifdef INTERM_CHECKSUM int print_flag = 0; #endif #ifdef CCN_TILING _dense_tiling_init() #else /* ~CCN_TILING */ _dense_notiling_init() #endif /* ~CCN_TILING */ #ifndef CCN_CACHE data_t *_x = layer->loc_x_ex; data_t *_y = layer->loc_y_ex; data_t *_W = layer->loc_w_ex; data_t *_b = layer->loc_b; #ifndef CCN_DOUBLEBUF // wait for the end of the fetch stage if not doing double buffering // ccn_memcpy_wait(); // #pragma omp barrier #endif /* ~CCN_DOUBLEBUF */ #else /* CCN_CACHE */ data_t *_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *_W = ccn_get_tile_2d( layer->w, aa, bb, layer->tiling_max_non, layer->tiling_max_nin, layer->n_in_neurons*layer->n_out_neurons ); #endif /* CCN_CACHE */ // biasing y if(bb==0) { for(int a=0; a<_non; a++) { _y[a] = _b[a]; } } // matrix x vector product linalg_mvprod(_W, 0, _x, _y, _nin, _non, layer->qf); // plp_matmul_i16(_W, _x, _y, _nin, _non, 1); // if(bb == layer->ntile_nin-1) { // printf("EX DEB %d %d\n", aa, bb); // for(int a=0; a<_non; a++) { // char *s = fixed2string(_y[a], 13, 5); // printf(" %d: %04x %s\n", a, _y[a], s); // free(s); // } // } // activation if(layer->activation == ACTIVATION_TANH) { for(int a=0; a<_non; a++) { _y[a] = ccn_tanh(_y[a]); } } else if(layer->activation == ACTIVATION_RELU) { for(int a=0; a<_non; a++) { _y[a] = (_y[a] < 0) ? 0 : _y[a]; } } #ifdef TILE_CHECKSUM { int i, sum=0; printf("[DenseLayer %s] Tile checksum %d,%d: ", layer->name, aa,bb); sum=0; for(i=0; i<_nin; i++){ sum+=_x[i]; } printf("xsum=%d, ", sum); sum=0; for(i=0; i<_non*_nin; i++) { sum+=_W[i]; } printf("wsum=%d, ", sum); sum=0; for(i=0; i<_non; i++) { sum+=_y[i]; } printf("ysum=%d\n", sum); printf(" xptr=%08x, wptr=%08x, yptr=%08x\n", _x, _W, _y); } #endif } #ifdef EXECUTE_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Execute profiling: %d\n", layer->name, t0); #endif /* EXECUTE_PROFILE */ } static void DenseLayer_pipe_wb( DenseLayer *layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif // if aa is -1, it means that this is the first tile (and bb, ii, jj also = -1) if(aa==-1) return; #ifdef WRITEBACK_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* WRITEBACK_PROFILE */ // #pragma omp single { _dense_tiling_init(); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); #ifdef WRITEBACK_CHECKSUM int32_t sum = 0; for(int i=0; i<_non; i++) { sum += layer->loc_y_wb[i]; } printf("[DenseLayer %s] Writeback checksum %d,%d: %d\n", layer->name, aa, bb, sum); #endif /* WRITEBACK_CHECKSUM */ #ifdef WRITEBACK_DEBUG printf("[DenseLayer %s] Writeback debug %d,%d:\n", layer->name, aa, bb); for(int i=0; i<_non; i++) { printf(" (%d): %04x\n", i, layer->loc_y_wb[i] & 0xffff); } #endif /* WRITEBACK_DEBUG */ // Y tile copy-out if(bb == layer->ntile_nin-1) { ccn_memcpy_async(// l2_y, // pointers layer->loc_y_wb, _non*sizeof(data_t) ); } } #ifdef WRITEBACK_DEBUG #pragma omp barrier #endif #ifdef WRITEBACK_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Writeback profiling: %d\n", layer->name, t0); #endif /* WRITEBACK_PROFILE */ } /** * Executes the given DenseLayer, i.e. computes its outputs given the inputs * defined in the data structure. * The DenseLayer computes the output of a densely connected neural network * layer with 3d inputs and outputs (an array of 2d feature maps). * * @param *layer * a pointer to the DenseLayer data structure to execute. */ void DenseLayer_exec(DenseLayer *layer) { // DenseLayer_exec is now organized as a pipeline with the following stages // fetch (fe) : DMA in of a tile // execute (ex) : execution of layer // write-back (wb) : DMA out of a tile // all indeces have a fetch, execute and write-back version int aa_pipe,bb_pipe; int aa_fe = -1, bb_fe = -1; int aa_ex = -1, bb_ex = -1; int aa_wb = -1, bb_wb = -1; #ifdef CCN_DOUBLEBUF // initialize double buffering in a known state int doublebuf_state_x_fe = 0; int doublebuf_state_y_fe = 0; int doublebuf_state_y_wb = 0; #endif /* CCN_DOUBLEBUF */ #ifndef CCN_CACHE // initialize state of fe local buffer pointers layer->loc_x_fe = layer->loc_x0; layer->loc_w_fe = layer->loc_w0; layer->loc_y_fe = layer->loc_y0; #endif /* ~CCN_CACHE */ // reset the weights! memset(layer->loc_w0, 0, sizeof(data_t)*layer->tiling_max_non*layer->tiling_max_nin); memset(layer->loc_w1, 0, sizeof(data_t)*layer->tiling_max_non*layer->tiling_max_nin); #ifdef CCN_TILING for(aa_pipe=0; aa_pipe<layer->ntile_non+NB_PIPE_STAGE-1; aa_pipe++) { for(bb_pipe=0; bb_pipe<layer->ntile_nin; bb_pipe++) { // update state of fe indeces if(bb_pipe<layer->ntile_nin) { bb_fe = bb_pipe; aa_fe = aa_pipe; } else { bb_fe = -1; aa_fe = -1; } #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF // update state of fe local buffer pointers if (doublebuf_state_x_fe == 0) { layer->loc_x_fe = layer->loc_x0; } else { layer->loc_x_fe = layer->loc_x1; } if (doublebuf_state_x_fe == 0) { layer->loc_w_fe = layer->loc_w0; } else { layer->loc_w_fe = layer->loc_w1; } if (doublebuf_state_y_fe == 0) { layer->loc_y_fe = layer->loc_y0; } else if (doublebuf_state_y_fe == 1) { layer->loc_y_fe = layer->loc_y1; } else { layer->loc_y_fe = layer->loc_y2; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ #ifdef PIPE_DEBUG printf("[DenseLayer %s pipe] aa=%d bb=%d\n", layer->name, aa_pipe, bb_pipe); printf(" fe: aa=%d bb=%d\n", aa_fe, bb_fe); printf(" ex: aa=%d bb=%d\n", aa_ex, bb_ex); printf(" wb: aa=%d bb=%d\n", aa_wb, bb_wb); printf(" doublebuf states: %d %d %d\n", doublebuf_state_x_fe, doublebuf_state_y_fe, doublebuf_state_y_wb); printf("\n"); #endif PIPE_DEBUG #ifdef PIPE_PROFILE reset_timer(); start_timer(); #endif /* PIPE_PROFILE */ // #ifndef DISABLE_OPENMP // #pragma omp parallel num_threads(3) // #endif { // fetch stage // #ifndef DISABLE_OPENMP // if(omp_get_thread_num() == THREAD_FE) // #endif DenseLayer_pipe_fe(layer, aa_fe, bb_fe); // execute stage // #ifndef DISABLE_OPENMP // if(omp_get_thread_num() == THREAD_EX) // #endif DenseLayer_pipe_ex(layer, aa_ex, bb_ex); // write-back stage // #ifndef DISABLE_OPENMP // if(omp_get_thread_num() == THREAD_WB) // #endif DenseLayer_pipe_wb(layer, aa_wb, bb_wb); } #ifdef PIPE_PROFILE stop_timer(); int t0 = get_time(); reset_timer(); printf("[DenseLayer %s] Pipe profiling: %d\n", layer->name, t0); #endif /* PIPE_PROFILE */ // update state of ex,wb indeces bb_wb = bb_ex; bb_ex = bb_fe; aa_wb = aa_ex; aa_ex = aa_fe; // update state of ex,wb local buffers layer->loc_x_ex = layer->loc_x_fe; layer->loc_w_ex = layer->loc_w_fe; layer->loc_y_wb = layer->loc_y_ex; layer->loc_y_ex = layer->loc_y_fe; #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF // switch double buffering state if (doublebuf_state_x_fe == 0) { doublebuf_state_x_fe = 1; } else { doublebuf_state_x_fe = 0; } if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_fe == 0) { doublebuf_state_y_fe = 1; } else if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_fe == 1) { doublebuf_state_y_fe = 0; } if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_wb == 0) { doublebuf_state_y_wb = 1; } else if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_wb == 1) { doublebuf_state_y_wb = 0; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ } } #else /* ~CCN_TILING */ // fetch stage DenseLayer_pipe_fe(layer, 0, 0); // execute stage DenseLayer_pipe_ex(layer, 0, 0); // write-back stage DenseLayer_pipe_wb(layer, 0, 0); #endif /* CCN_TILING */ }
#include "linalg.h" #include "tiling.h" #include "DenseLayer.h" #ifdef CCN_TILING_LESSTIME #define _dense_tiling_init(); \ unsigned char (*tile_grid_non)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_non; \ unsigned char (*tile_grid_nin)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_nin; \ int _non = tile_grid_non[aa][bb]; \ int _nin = tile_grid_nin[aa][bb]; #else /* ~CCN_TILING_LESSTIME */ #define _dense_tiling_init(); \ int _non = (aa < layer->ntile_full_non) ? layer->tiling_max_non : layer->tlast_non; \ int _nin = (bb < layer->ntile_full_nin) ? layer->tiling_max_nin : layer->tlast_nin; #endif /* ~CCN_TILING_LESSTIME */ #define _dense_notiling_init(); \ int _non = layer->n_out_neurons; \ int _nin = layer->n_in_neurons; /** * Allocates a new DenseLayer data structure and its fields (weight, bias, * output feature maps). * * @return a pointer to the new DenseLayer data structure. * * @param n_in_neurons * the number of input feature maps. * @param n_out_neurons * the number of output feature maps. * @param input_height * the height of the input feature maps. * @param input_width * the width of the input feature maps. * @param output_height * the height of the output feature maps. * @param output_width * the width of the output feature maps. * @param activation * 1 if activation is tanh, 0 if no activation. * @param *x * a *mandatory* pointer to the input feature maps. * @param *y * an *optional* pointer to the already-allocated output feature maps. If * NULL, DenseLayer_new() will allocate y automatically. */ DenseLayer * DenseLayer_new( #ifdef CCN_NOALLOC DenseLayer * layer, #endif /* CCN_NOALLOC */ const char *name, data_t * w, data_t * b, data_t * x, data_t * y, data_t * loc_x0, data_t * loc_x1, data_t * loc_y0, data_t * loc_y1, data_t * loc_y2, data_t * loc_w0, data_t * loc_w1, data_t * loc_b, int n_out_neurons, int n_in_neurons, int activation, int tiling_max_non, int tiling_max_nin, unsigned qf ) { #ifndef CCN_NOALLOC //build DenseLayer DenseLayer * layer; layer = ccn_malloc(sizeof(DenseLayer)); #endif /* ifndef CCN_NOALLOC */ layer->name = name; layer->n_in_neurons = n_in_neurons; layer->n_out_neurons = n_out_neurons; layer->activation = activation; layer->w = w; layer->b = b; layer->x = x; layer->y = y; layer->qf = qf; #ifndef CCN_CACHE layer->loc_x0 = loc_x0; layer->loc_y0 = loc_y0; layer->loc_x1 = loc_x1; layer->loc_y1 = loc_y1; layer->loc_y2 = loc_y2; layer->loc_w0 = loc_w0; layer->loc_w1 = loc_w1; layer->loc_b = loc_b; #endif /* ifndef CCN_CACHE */ layer->tiling_max_non = tiling_max_non; layer->tiling_max_nin = tiling_max_nin; #ifdef CCN_TILING //define and record the number of tiles int ntile_non = (n_out_neurons % tiling_max_non) ? n_out_neurons / tiling_max_non + 1 : n_out_neurons / tiling_max_non; int ntile_nin = (n_in_neurons % tiling_max_nin) ? n_in_neurons / tiling_max_nin + 1 : n_in_neurons / tiling_max_nin; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #ifdef CCN_TILING_LESSMEM layer->tlast_non = n_out_neurons % tiling_max_non; layer->tlast_nin = n_in_neurons % tiling_max_nin; layer->ntile_full_non = ntile_non; layer->ntile_full_nin = ntile_nin; #else /* ~CCN_TILING_LESSMEM */ //allocate the tile grid in a flat fashion layer->tile_grid_non = ccn_malloc(sizeof(unsigned char) * (ntile_non + NB_PIPE_STAGE - 1) * ntile_nin); layer->tile_grid_nin = ccn_malloc(sizeof(unsigned char) * (ntile_non + NB_PIPE_STAGE - 1) * ntile_nin); //cast the tile grid to a 4 - dimensional array unsigned char (*tile_grid_non)[ntile_nin] = layer->tile_grid_non; unsigned char (*tile_grid_nin)[ntile_nin] = layer->tile_grid_nin; #endif /* ~CCN_TILING_LESSMEM */ //fill in the tile grid int aa, bb; for (aa = 0; aa < layer->ntile_non; aa++) { for (bb = 0; bb < layer->ntile_nin; bb++) { #ifdef CCN_TILING_LESSTIME if (bb * tiling_max_nin > n_in_neurons - tiling_max_nin) { tile_grid_nin[aa][bb] = (unsigned char)n_in_neurons % tiling_max_nin; } else { tile_grid_nin[aa][bb] = (unsigned char)tiling_max_nin; } if (aa * tiling_max_non > n_out_neurons - tiling_max_non) { tile_grid_non[aa][bb] = (unsigned char)n_out_neurons % tiling_max_non; } else { tile_grid_non[aa][bb] = (unsigned char)tiling_max_non; } #else /* ~CCN_TILING_LESSTIME */ if (bb * tiling_max_nin > n_in_neurons - tiling_max_nin) { layer->ntile_full_nin = bb; } if (aa * tiling_max_non > n_out_neurons - tiling_max_non) { layer->ntile_full_non = aa; } #endif /* ~CCN_TILING_LESSTIME */ } } #ifdef CCN_TILING_LESSTIME for (aa = layer->ntile_non; aa < layer->ntile_non + NB_PIPE_STAGE - 1; aa++) { for (bb = 0; bb < layer->ntile_nin; bb++) { tile_grid_nin[aa][bb] = tiling_max_nin; tile_grid_non[aa][bb] = tiling_max_non; } } #endif /* CCN_TILING_LESSTIME */ #else /* ~CCN_TILING */ //no tile grid int ntile_non = n_out_neurons; int ntile_nin = n_in_neurons; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #endif /* ~CCN_TILING */ #ifdef TILING_DEBUG printf("[DenseLayer %s] NOn grid:\n", layer->name); for (aa = 0; aa < layer->ntile_non; aa++) { printf(" "); for (bb = 0; bb < layer->ntile_nin; bb++) { printf("%d ", tile_grid_non[aa][bb]); } printf("\n"); } printf("[DenseLayer %s] NIn grid:\n", layer->name); for (aa = 0; aa < layer->ntile_non; aa++) { printf(" "); for (bb = 0; bb < layer->ntile_nin; bb++) { printf("%d ", tile_grid_nin[aa][bb]); } printf("\n"); } #endif /* TILING_DEBUG */ return layer; } void DenseLayer_delete( DenseLayer * layer ) { #ifndef CCN_CACHE ccn_free(layer->loc_w0); ccn_free(layer->loc_w1); ccn_free(layer->loc_b); #endif /* ~CCN_CACHE */ #ifdef CCN_TILING ccn_free(layer->tile_grid_non); ccn_free(layer->tile_grid_nin); #endif /* ~CCN_TILING */ ccn_free(layer); } static void DenseLayer_pipe_fe( DenseLayer * layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif //if aa is - 1, it means that this is the last tile(and bb, ii, jj also = -1) if (aa == -1) return; #ifdef FETCH_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* FETCH_PROFILE */ { _dense_tiling_init() data_t *l2_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *l2_W = ccn_get_tile_2d( layer->w, bb, aa, layer->tiling_max_nin, layer->tiling_max_non, layer->n_out_neurons ); //X tile copy - in ccn_memcpy_async( layer->loc_x_fe, //pointers l2_x, _nin * sizeof(data_t) ); //W copy - in(check misalignment) ccn_memcpy_async_2d( layer->loc_w_fe, //pointers l2_W, _nin, //sizes _non * sizeof(data_t), _non * sizeof(data_t), //local strides layer->n_out_neurons * sizeof(data_t) // remote strides ); //b copy - in if (bb == 0) { ccn_memcpy_async( layer->loc_b, &layer->b[aa * layer->tiling_max_non], _non * sizeof(data_t) ); } #ifdef FETCH_CHECKSUM int32_t sum_x = 0; int32_t sum_W = 0; int32_t sum_y = 0; for (int i = 0; i < _nin; i++) { sum_x += layer->loc_x_fe[i]; } for (int i = 0; i < _non * _nin; i++) { sum_W += layer->loc_w_fe[i]; } for (int i = 0; i < _non; i++) { sum_y += layer->loc_y_fe[i]; } printf("[DenseLayer %s] Fetch checksum %d,%d: x=%d W=%d y=%d\n", layer->name, aa, bb, sum_x, sum_W, sum_y); #endif /* FETCH_CHECKSUM */ } #ifdef FETCH_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Fetch profiling: %d\n", layer->name, t0); #endif /* FETCH_PROFILE */ } static void DenseLayer_pipe_ex( DenseLayer * layer, int aa, int bb ) { //if aa is - 1, it means that this is the first tile(and bb, ii, jj also = -1) if (aa == -1) return; #ifdef EXECUTE_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* EXECUTE_PROFILE */ // #ifdef INTERM_CHECKSUM int print_flag = 0; #endif #ifdef CCN_TILING _dense_tiling_init() #else /* ~CCN_TILING */ _dense_notiling_init() #endif /* ~CCN_TILING */ #ifndef CCN_CACHE data_t *_x = layer->loc_x_ex; data_t *_y = layer->loc_y_ex; data_t *_W = layer->loc_w_ex; data_t *_b = layer->loc_b; #ifndef CCN_DOUBLEBUF //wait for the end of the fetch stage if not doing double buffering // ccn_memcpy_wait(); // #endif /* ~CCN_DOUBLEBUF */ #else /* CCN_CACHE */ data_t *_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *_W = ccn_get_tile_2d( layer->w, aa, bb, layer->tiling_max_non, layer->tiling_max_nin, layer->n_in_neurons * layer->n_out_neurons ); #endif /* CCN_CACHE */ //biasing y if (bb == 0) { for (int a = 0; a < _non; a++) { _y[a] = _b[a]; } } //matrix x vector product linalg_mvprod(_W, 0, _x, _y, _nin, _non, layer->qf); //plp_matmul_i16(_W, _x, _y, _nin, _non, 1); //if (bb == layer->ntile_nin - 1) { //printf("EX DEB %d %d\n", aa, bb); //for (int a = 0; a < _non; a++) { //char *s = fixed2string(_y[a], 13, 5); //printf(" %d: %04x %s\n", a, _y[a], s); //free(s); // } // } //activation if (layer->activation == ACTIVATION_TANH) { for (int a = 0; a < _non; a++) { _y[a] = ccn_tanh(_y[a]); } } else if (layer->activation == ACTIVATION_RELU) { for (int a = 0; a < _non; a++) { _y[a] = (_y[a] < 0) ? 0 : _y[a]; } } #ifdef TILE_CHECKSUM { int i, sum = 0; printf("[DenseLayer %s] Tile checksum %d,%d: ", layer->name, aa, bb); sum = 0; for (i = 0; i < _nin; i++) { sum += _x[i]; } printf("xsum=%d, ", sum); sum = 0; for (i = 0; i < _non * _nin; i++) { sum += _W[i]; } printf("wsum=%d, ", sum); sum = 0; for (i = 0; i < _non; i++) { sum += _y[i]; } printf("ysum=%d\n", sum); printf(" xptr=%08x, wptr=%08x, yptr=%08x\n", _x, _W, _y); } #endif #ifdef EXECUTE_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Execute profiling: %d\n", layer->name, t0); #endif /* EXECUTE_PROFILE */ } static void DenseLayer_pipe_wb( DenseLayer * layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif //if aa is - 1, it means that this is the first tile(and bb, ii, jj also = -1) if (aa == -1) return; #ifdef WRITEBACK_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* WRITEBACK_PROFILE */ // _dense_tiling_init(); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); #ifdef WRITEBACK_CHECKSUM int32_t sum = 0; for (int i = 0; i < _non; i++) { sum += layer->loc_y_wb[i]; } printf("[DenseLayer %s] Writeback checksum %d,%d: %d\n", layer->name, aa, bb, sum); #endif /* WRITEBACK_CHECKSUM */ #ifdef WRITEBACK_DEBUG printf("[DenseLayer %s] Writeback debug %d,%d:\n", layer->name, aa, bb); for (int i = 0; i < _non; i++) { printf(" (%d): %04x\n", i, layer->loc_y_wb[i] & 0xffff); } #endif /* WRITEBACK_DEBUG */ //Y tile copy - out if (bb == layer->ntile_nin - 1) { ccn_memcpy_async(// l2_y, //pointers layer->loc_y_wb, _non * sizeof(data_t) ); } #ifdef WRITEBACK_DEBUG #endif #ifdef WRITEBACK_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Writeback profiling: %d\n", layer->name, t0); #endif /* WRITEBACK_PROFILE */ } /** * Executes the given DenseLayer, i.e. computes its outputs given the inputs * defined in the data structure. * The DenseLayer computes the output of a densely connected neural network * layer with 3d inputs and outputs (an array of 2d feature maps). * * @param *layer * a pointer to the DenseLayer data structure to execute. */ void DenseLayer_exec(DenseLayer * layer) { //DenseLayer_exec is now organized as a pipeline with the following stages // fetch(fe):DMA in of a tile // execute(ex):execution of layer // write - back(wb):DMA out of a tile // all indeces have a fetch, execute and write - back version int aa_pipe, bb_pipe; int aa_fe = -1, bb_fe = -1; int aa_ex = -1, bb_ex = -1; int aa_wb = -1, bb_wb = -1; #ifdef CCN_DOUBLEBUF //initialize double buffering in a known state int doublebuf_state_x_fe = 0; int doublebuf_state_y_fe = 0; int doublebuf_state_y_wb = 0; #endif /* CCN_DOUBLEBUF */ #ifndef CCN_CACHE //initialize state of fe local buffer pointers layer->loc_x_fe = layer->loc_x0; layer->loc_w_fe = layer->loc_w0; layer->loc_y_fe = layer->loc_y0; #endif /* ~CCN_CACHE */ //reset the weights ! memset(layer->loc_w0, 0, sizeof(data_t) * layer->tiling_max_non * layer->tiling_max_nin); memset(layer->loc_w1, 0, sizeof(data_t) * layer->tiling_max_non * layer->tiling_max_nin); #ifdef CCN_TILING for (aa_pipe = 0; aa_pipe < layer->ntile_non + NB_PIPE_STAGE - 1; aa_pipe++) { for (bb_pipe = 0; bb_pipe < layer->ntile_nin; bb_pipe++) { //update state of fe indeces if (bb_pipe < layer->ntile_nin) { bb_fe = bb_pipe; aa_fe = aa_pipe; } else { bb_fe = -1; aa_fe = -1; } #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF //update state of fe local buffer pointers if (doublebuf_state_x_fe == 0) { layer->loc_x_fe = layer->loc_x0; } else { layer->loc_x_fe = layer->loc_x1; } if (doublebuf_state_x_fe == 0) { layer->loc_w_fe = layer->loc_w0; } else { layer->loc_w_fe = layer->loc_w1; } if (doublebuf_state_y_fe == 0) { layer->loc_y_fe = layer->loc_y0; } else if (doublebuf_state_y_fe == 1) { layer->loc_y_fe = layer->loc_y1; } else { layer->loc_y_fe = layer->loc_y2; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ #ifdef PIPE_DEBUG printf("[DenseLayer %s pipe] aa=%d bb=%d\n", layer->name, aa_pipe, bb_pipe); printf(" fe: aa=%d bb=%d\n", aa_fe, bb_fe); printf(" ex: aa=%d bb=%d\n", aa_ex, bb_ex); printf(" wb: aa=%d bb=%d\n", aa_wb, bb_wb); printf(" doublebuf states: %d %d %d\n", doublebuf_state_x_fe, doublebuf_state_y_fe, doublebuf_state_y_wb); printf("\n"); #endif /* PIPE_DEBUG */ #ifdef PIPE_PROFILE reset_timer(); start_timer(); #endif /* PIPE_PROFILE */ // #ifndef DISABLE_OPENMP // // #endif { //fetch stage // #ifndef DISABLE_OPENMP //if (omp_get_thread_num() == THREAD_FE) // #endif DenseLayer_pipe_fe(layer, aa_fe, bb_fe); //execute stage // #ifndef DISABLE_OPENMP //if (omp_get_thread_num() == THREAD_EX) // #endif DenseLayer_pipe_ex(layer, aa_ex, bb_ex); //write - back stage // #ifndef DISABLE_OPENMP //if (omp_get_thread_num() == THREAD_WB) // #endif DenseLayer_pipe_wb(layer, aa_wb, bb_wb); } #ifdef PIPE_PROFILE stop_timer(); int t0 = get_time(); reset_timer(); printf("[DenseLayer %s] Pipe profiling: %d\n", layer->name, t0); #endif /* PIPE_PROFILE */ //update state of ex, wb indeces bb_wb = bb_ex; bb_ex = bb_fe; aa_wb = aa_ex; aa_ex = aa_fe; //update state of ex, wb local buffers layer->loc_x_ex = layer->loc_x_fe; layer->loc_w_ex = layer->loc_w_fe; layer->loc_y_wb = layer->loc_y_ex; layer->loc_y_ex = layer->loc_y_fe; #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF //switch double buffering state if (doublebuf_state_x_fe == 0) { doublebuf_state_x_fe = 1; } else { doublebuf_state_x_fe = 0; } if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_fe == 0) { doublebuf_state_y_fe = 1; } else if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_fe == 1) { doublebuf_state_y_fe = 0; } if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_wb == 0) { doublebuf_state_y_wb = 1; } else if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_wb == 1) { doublebuf_state_y_wb = 0; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ } } #else /* ~CCN_TILING */ //fetch stage DenseLayer_pipe_fe(layer, 0, 0); //execute stage DenseLayer_pipe_ex(layer, 0, 0); //write - back stage DenseLayer_pipe_wb(layer, 0, 0); #endif /* CCN_TILING */ }
#include "linalg.h" #include "tiling.h" #include "DenseLayer.h" #ifdef CCN_TILING_LESSTIME #define _dense_tiling_init(); \ unsigned char (*tile_grid_non)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_non; \ unsigned char (*tile_grid_nin)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_nin; \ int _non = tile_grid_non[aa][bb]; \ int _nin = tile_grid_nin[aa][bb]; #else /* ~CCN_TILING_LESSTIME */ #define _dense_tiling_init(); \ int _non = (aa < layer->ntile_full_non) ? layer->tiling_max_non : layer->tlast_non; \ int _nin = (bb < layer->ntile_full_nin) ? layer->tiling_max_nin : layer->tlast_nin; #endif /* ~CCN_TILING_LESSTIME */ #define _dense_notiling_init(); \ int _non = layer->n_out_neurons; \ int _nin = layer->n_in_neurons; /** * Allocates a new DenseLayer data structure and its fields (weight, bias, * output feature maps). * * @return a pointer to the new DenseLayer data structure. * * @param n_in_neurons * the number of input feature maps. * @param n_out_neurons * the number of output feature maps. * @param input_height * the height of the input feature maps. * @param input_width * the width of the input feature maps. * @param output_height * the height of the output feature maps. * @param output_width * the width of the output feature maps. * @param activation * 1 if activation is tanh, 0 if no activation. * @param *x * a *mandatory* pointer to the input feature maps. * @param *y * an *optional* pointer to the already-allocated output feature maps. If * NULL, DenseLayer_new() will allocate y automatically. */ DenseLayer * DenseLayer_new( #ifdef CCN_NOALLOC DenseLayer * layer, #endif /* CCN_NOALLOC */ const char *name, data_t * w, data_t * b, data_t * x, data_t * y, data_t * loc_x0, data_t * loc_x1, data_t * loc_y0, data_t * loc_y1, data_t * loc_y2, data_t * loc_w0, data_t * loc_w1, data_t * loc_b, int n_out_neurons, int n_in_neurons, int activation, int tiling_max_non, int tiling_max_nin, unsigned qf ) { #ifndef CCN_NOALLOC //build DenseLayer DenseLayer * layer; layer = ccn_malloc(sizeof(DenseLayer)); #endif /* ifndef CCN_NOALLOC */ layer->name = name; layer->n_in_neurons = n_in_neurons; layer->n_out_neurons = n_out_neurons; layer->activation = activation; layer->w = w; layer->b = b; layer->x = x; layer->y = y; layer->qf = qf; #ifndef CCN_CACHE layer->loc_x0 = loc_x0; layer->loc_y0 = loc_y0; layer->loc_x1 = loc_x1; layer->loc_y1 = loc_y1; layer->loc_y2 = loc_y2; layer->loc_w0 = loc_w0; layer->loc_w1 = loc_w1; layer->loc_b = loc_b; #endif /* ifndef CCN_CACHE */ layer->tiling_max_non = tiling_max_non; layer->tiling_max_nin = tiling_max_nin; #ifdef CCN_TILING //define and record the number of tiles int ntile_non = (n_out_neurons % tiling_max_non) ? n_out_neurons / tiling_max_non + 1 : n_out_neurons / tiling_max_non; int ntile_nin = (n_in_neurons % tiling_max_nin) ? n_in_neurons / tiling_max_nin + 1 : n_in_neurons / tiling_max_nin; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #ifdef CCN_TILING_LESSMEM layer->tlast_non = n_out_neurons % tiling_max_non; layer->tlast_nin = n_in_neurons % tiling_max_nin; layer->ntile_full_non = ntile_non; layer->ntile_full_nin = ntile_nin; #else /* ~CCN_TILING_LESSMEM */ //allocate the tile grid in a flat fashion layer->tile_grid_non = ccn_malloc(sizeof(unsigned char) * (ntile_non + NB_PIPE_STAGE - 1) * ntile_nin); layer->tile_grid_nin = ccn_malloc(sizeof(unsigned char) * (ntile_non + NB_PIPE_STAGE - 1) * ntile_nin); //cast the tile grid to a 4 - dimensional array unsigned char (*tile_grid_non)[ntile_nin] = layer->tile_grid_non; unsigned char (*tile_grid_nin)[ntile_nin] = layer->tile_grid_nin; #endif /* ~CCN_TILING_LESSMEM */ //fill in the tile grid int aa, bb; for (aa = 0; aa < layer->ntile_non; aa++) { for (bb = 0; bb < layer->ntile_nin; bb++) { #ifdef CCN_TILING_LESSTIME if (bb * tiling_max_nin > n_in_neurons - tiling_max_nin) { tile_grid_nin[aa][bb] = (unsigned char)n_in_neurons % tiling_max_nin; } else { tile_grid_nin[aa][bb] = (unsigned char)tiling_max_nin; } if (aa * tiling_max_non > n_out_neurons - tiling_max_non) { tile_grid_non[aa][bb] = (unsigned char)n_out_neurons % tiling_max_non; } else { tile_grid_non[aa][bb] = (unsigned char)tiling_max_non; } #else /* ~CCN_TILING_LESSTIME */ if (bb * tiling_max_nin > n_in_neurons - tiling_max_nin) { layer->ntile_full_nin = bb; } if (aa * tiling_max_non > n_out_neurons - tiling_max_non) { layer->ntile_full_non = aa; } #endif /* ~CCN_TILING_LESSTIME */ } } #ifdef CCN_TILING_LESSTIME for (aa = layer->ntile_non; aa < layer->ntile_non + NB_PIPE_STAGE - 1; aa++) { for (bb = 0; bb < layer->ntile_nin; bb++) { tile_grid_nin[aa][bb] = tiling_max_nin; tile_grid_non[aa][bb] = tiling_max_non; } } #endif /* CCN_TILING_LESSTIME */ #else /* ~CCN_TILING */ //no tile grid int ntile_non = n_out_neurons; int ntile_nin = n_in_neurons; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #endif /* ~CCN_TILING */ #ifdef TILING_DEBUG printf("[DenseLayer %s] NOn grid:\n", layer->name); for (aa = 0; aa < layer->ntile_non; aa++) { printf(" "); for (bb = 0; bb < layer->ntile_nin; bb++) { printf("%d ", tile_grid_non[aa][bb]); } printf("\n"); } printf("[DenseLayer %s] NIn grid:\n", layer->name); for (aa = 0; aa < layer->ntile_non; aa++) { printf(" "); for (bb = 0; bb < layer->ntile_nin; bb++) { printf("%d ", tile_grid_nin[aa][bb]); } printf("\n"); } #endif /* TILING_DEBUG */ return layer; } void DenseLayer_delete( DenseLayer * layer ) { #ifndef CCN_CACHE ccn_free(layer->loc_w0); ccn_free(layer->loc_w1); ccn_free(layer->loc_b); #endif /* ~CCN_CACHE */ #ifdef CCN_TILING ccn_free(layer->tile_grid_non); ccn_free(layer->tile_grid_nin); #endif /* ~CCN_TILING */ ccn_free(layer); } static void DenseLayer_pipe_fe( DenseLayer * layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif //if aa is - 1, it means that this is the last tile(and bb, ii, jj also = -1) if (aa == -1) return; #ifdef FETCH_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* FETCH_PROFILE */ { _dense_tiling_init() data_t *l2_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *l2_W = ccn_get_tile_2d( layer->w, bb, aa, layer->tiling_max_nin, layer->tiling_max_non, layer->n_out_neurons ); //X tile copy - in ccn_memcpy_async( layer->loc_x_fe, //pointers l2_x, _nin * sizeof(data_t) ); //W copy - in(check misalignment) ccn_memcpy_async_2d( layer->loc_w_fe, //pointers l2_W, _nin, //sizes _non * sizeof(data_t), _non * sizeof(data_t), //local strides layer->n_out_neurons * sizeof(data_t) // remote strides ); //b copy - in if (bb == 0) { ccn_memcpy_async( layer->loc_b, &layer->b[aa * layer->tiling_max_non], _non * sizeof(data_t) ); } #ifdef FETCH_CHECKSUM int32_t sum_x = 0; int32_t sum_W = 0; int32_t sum_y = 0; for (int i = 0; i < _nin; i++) { sum_x += layer->loc_x_fe[i]; } for (int i = 0; i < _non * _nin; i++) { sum_W += layer->loc_w_fe[i]; } for (int i = 0; i < _non; i++) { sum_y += layer->loc_y_fe[i]; } printf("[DenseLayer %s] Fetch checksum %d,%d: x=%d W=%d y=%d\n", layer->name, aa, bb, sum_x, sum_W, sum_y); #endif /* FETCH_CHECKSUM */ } #ifdef FETCH_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Fetch profiling: %d\n", layer->name, t0); #endif /* FETCH_PROFILE */ } static void DenseLayer_pipe_ex( DenseLayer * layer, int aa, int bb ) { //if aa is - 1, it means that this is the first tile(and bb, ii, jj also = -1) if (aa == -1) return; #ifdef EXECUTE_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* EXECUTE_PROFILE */ // #pragma omp single nowait { #ifdef INTERM_CHECKSUM int print_flag = 0; #endif #ifdef CCN_TILING _dense_tiling_init() #else /* ~CCN_TILING */ _dense_notiling_init() #endif /* ~CCN_TILING */ #ifndef CCN_CACHE data_t *_x = layer->loc_x_ex; data_t *_y = layer->loc_y_ex; data_t *_W = layer->loc_w_ex; data_t *_b = layer->loc_b; #ifndef CCN_DOUBLEBUF //wait for the end of the fetch stage if not doing double buffering // ccn_memcpy_wait(); // #pragma omp barrier #endif /* ~CCN_DOUBLEBUF */ #else /* CCN_CACHE */ data_t *_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *_W = ccn_get_tile_2d( layer->w, aa, bb, layer->tiling_max_non, layer->tiling_max_nin, layer->n_in_neurons * layer->n_out_neurons ); #endif /* CCN_CACHE */ //biasing y if (bb == 0) { for (int a = 0; a < _non; a++) { _y[a] = _b[a]; } } //matrix x vector product linalg_mvprod(_W, 0, _x, _y, _nin, _non, layer->qf); //plp_matmul_i16(_W, _x, _y, _nin, _non, 1); //if (bb == layer->ntile_nin - 1) { //printf("EX DEB %d %d\n", aa, bb); //for (int a = 0; a < _non; a++) { //char *s = fixed2string(_y[a], 13, 5); //printf(" %d: %04x %s\n", a, _y[a], s); //free(s); // } // } //activation if (layer->activation == ACTIVATION_TANH) { for (int a = 0; a < _non; a++) { _y[a] = ccn_tanh(_y[a]); } } else if (layer->activation == ACTIVATION_RELU) { for (int a = 0; a < _non; a++) { _y[a] = (_y[a] < 0) ? 0 : _y[a]; } } #ifdef TILE_CHECKSUM { int i, sum = 0; printf("[DenseLayer %s] Tile checksum %d,%d: ", layer->name, aa, bb); sum = 0; for (i = 0; i < _nin; i++) { sum += _x[i]; } printf("xsum=%d, ", sum); sum = 0; for (i = 0; i < _non * _nin; i++) { sum += _W[i]; } printf("wsum=%d, ", sum); sum = 0; for (i = 0; i < _non; i++) { sum += _y[i]; } printf("ysum=%d\n", sum); printf(" xptr=%08x, wptr=%08x, yptr=%08x\n", _x, _W, _y); } #endif } #ifdef EXECUTE_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Execute profiling: %d\n", layer->name, t0); #endif /* EXECUTE_PROFILE */ } static void DenseLayer_pipe_wb( DenseLayer * layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif //if aa is - 1, it means that this is the first tile(and bb, ii, jj also = -1) if (aa == -1) return; #ifdef WRITEBACK_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* WRITEBACK_PROFILE */ // #pragma omp single { _dense_tiling_init(); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); #ifdef WRITEBACK_CHECKSUM int32_t sum = 0; for (int i = 0; i < _non; i++) { sum += layer->loc_y_wb[i]; } printf("[DenseLayer %s] Writeback checksum %d,%d: %d\n", layer->name, aa, bb, sum); #endif /* WRITEBACK_CHECKSUM */ #ifdef WRITEBACK_DEBUG printf("[DenseLayer %s] Writeback debug %d,%d:\n", layer->name, aa, bb); for (int i = 0; i < _non; i++) { printf(" (%d): %04x\n", i, layer->loc_y_wb[i] & 0xffff); } #endif /* WRITEBACK_DEBUG */ //Y tile copy - out if (bb == layer->ntile_nin - 1) { ccn_memcpy_async(// l2_y, //pointers layer->loc_y_wb, _non * sizeof(data_t) ); } } #ifdef WRITEBACK_DEBUG #pragma omp barrier #endif #ifdef WRITEBACK_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Writeback profiling: %d\n", layer->name, t0); #endif /* WRITEBACK_PROFILE */ } /** * Executes the given DenseLayer, i.e. computes its outputs given the inputs * defined in the data structure. * The DenseLayer computes the output of a densely connected neural network * layer with 3d inputs and outputs (an array of 2d feature maps). * * @param *layer * a pointer to the DenseLayer data structure to execute. */ void DenseLayer_exec(DenseLayer * layer) { //DenseLayer_exec is now organized as a pipeline with the following stages // fetch(fe):DMA in of a tile // execute(ex):execution of layer // write - back(wb):DMA out of a tile // all indeces have a fetch, execute and write - back version int aa_pipe, bb_pipe; int aa_fe = -1, bb_fe = -1; int aa_ex = -1, bb_ex = -1; int aa_wb = -1, bb_wb = -1; #ifdef CCN_DOUBLEBUF //initialize double buffering in a known state int doublebuf_state_x_fe = 0; int doublebuf_state_y_fe = 0; int doublebuf_state_y_wb = 0; #endif /* CCN_DOUBLEBUF */ #ifndef CCN_CACHE //initialize state of fe local buffer pointers layer->loc_x_fe = layer->loc_x0; layer->loc_w_fe = layer->loc_w0; layer->loc_y_fe = layer->loc_y0; #endif /* ~CCN_CACHE */ //reset the weights ! memset(layer->loc_w0, 0, sizeof(data_t) * layer->tiling_max_non * layer->tiling_max_nin); memset(layer->loc_w1, 0, sizeof(data_t) * layer->tiling_max_non * layer->tiling_max_nin); #ifdef CCN_TILING for (aa_pipe = 0; aa_pipe < layer->ntile_non + NB_PIPE_STAGE - 1; aa_pipe++) { for (bb_pipe = 0; bb_pipe < layer->ntile_nin; bb_pipe++) { //update state of fe indeces if (bb_pipe < layer->ntile_nin) { bb_fe = bb_pipe; aa_fe = aa_pipe; } else { bb_fe = -1; aa_fe = -1; } #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF //update state of fe local buffer pointers if (doublebuf_state_x_fe == 0) { layer->loc_x_fe = layer->loc_x0; } else { layer->loc_x_fe = layer->loc_x1; } if (doublebuf_state_x_fe == 0) { layer->loc_w_fe = layer->loc_w0; } else { layer->loc_w_fe = layer->loc_w1; } if (doublebuf_state_y_fe == 0) { layer->loc_y_fe = layer->loc_y0; } else if (doublebuf_state_y_fe == 1) { layer->loc_y_fe = layer->loc_y1; } else { layer->loc_y_fe = layer->loc_y2; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ #ifdef PIPE_DEBUG printf("[DenseLayer %s pipe] aa=%d bb=%d\n", layer->name, aa_pipe, bb_pipe); printf(" fe: aa=%d bb=%d\n", aa_fe, bb_fe); printf(" ex: aa=%d bb=%d\n", aa_ex, bb_ex); printf(" wb: aa=%d bb=%d\n", aa_wb, bb_wb); printf(" doublebuf states: %d %d %d\n", doublebuf_state_x_fe, doublebuf_state_y_fe, doublebuf_state_y_wb); printf("\n"); #endif /* PIPE_DEBUG */ #ifdef PIPE_PROFILE reset_timer(); start_timer(); #endif /* PIPE_PROFILE */ // #ifndef DISABLE_OPENMP // #pragma omp parallel num_threads(3) // #endif { //fetch stage // #ifndef DISABLE_OPENMP //if (omp_get_thread_num() == THREAD_FE) // #endif DenseLayer_pipe_fe(layer, aa_fe, bb_fe); //execute stage // #ifndef DISABLE_OPENMP //if (omp_get_thread_num() == THREAD_EX) // #endif DenseLayer_pipe_ex(layer, aa_ex, bb_ex); //write - back stage // #ifndef DISABLE_OPENMP //if (omp_get_thread_num() == THREAD_WB) // #endif DenseLayer_pipe_wb(layer, aa_wb, bb_wb); } #ifdef PIPE_PROFILE stop_timer(); int t0 = get_time(); reset_timer(); printf("[DenseLayer %s] Pipe profiling: %d\n", layer->name, t0); #endif /* PIPE_PROFILE */ //update state of ex, wb indeces bb_wb = bb_ex; bb_ex = bb_fe; aa_wb = aa_ex; aa_ex = aa_fe; //update state of ex, wb local buffers layer->loc_x_ex = layer->loc_x_fe; layer->loc_w_ex = layer->loc_w_fe; layer->loc_y_wb = layer->loc_y_ex; layer->loc_y_ex = layer->loc_y_fe; #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF //switch double buffering state if (doublebuf_state_x_fe == 0) { doublebuf_state_x_fe = 1; } else { doublebuf_state_x_fe = 0; } if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_fe == 0) { doublebuf_state_y_fe = 1; } else if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_fe == 1) { doublebuf_state_y_fe = 0; } if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_wb == 0) { doublebuf_state_y_wb = 1; } else if (bb_pipe == layer->ntile_nin - 1 && doublebuf_state_y_wb == 1) { doublebuf_state_y_wb = 0; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ } } #else /* ~CCN_TILING */ //fetch stage DenseLayer_pipe_fe(layer, 0, 0); //execute stage DenseLayer_pipe_ex(layer, 0, 0); //write - back stage DenseLayer_pipe_wb(layer, 0, 0); #endif /* CCN_TILING */ }
DenseMatrix.h
/* * DenseMatrix.h * * Created on: Nov 25, 2015 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * @ingroup algebraic * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std::vector<double> entries; double zero; public: /** Default constructor */ DenseMatrix(); /** * Constructs the DenseMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, double zero = 0.0); /** * Constructs the DenseMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, double zero = 0.0); /** * Constructs the @a dimension x @a dimension DenseMatrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, const std::vector<Triplet>& triplets, double zero = 0.0); /** * Constructs the @a nRows x @a nCols DenseMatrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, const std::vector<Triplet>& triplets, double zero = 0.0); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @param zero The zero element (default is 0.0). * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std::vector<double>& entries, double zero = 0.0); /** Default destructor */ virtual ~DenseMatrix() = default; /** Default copy constructor */ DenseMatrix (const DenseMatrix &other) = default; /** Default move constructor */ DenseMatrix (DenseMatrix &&other) = default; /** Default copy assignment operator */ DenseMatrix& operator=(DenseMatrix &&other) = default; /** Default move assignment operator */ DenseMatrix& operator=(const DenseMatrix &other) = default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. * @note This function is linear in the number of columns of the matrix. */ count nnzInRow(const index i) const; /** * @return Number of non-zeros in this matrix. * @note This function takes nRows * nCols operations. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator()(const index i, const index j) const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i) const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j) const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator+(const DenseMatrix &other) const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix& operator+=(const DenseMatrix &other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator-(const DenseMatrix &other) const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix& operator-=(const DenseMatrix &other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator*(const double &scalar) const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix& operator*=(const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator*(const Vector &vector) const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator*(const DenseMatrix &other) const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator/(const double &divisor) const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix& operator/=(const double &divisor); /** * Transposes this matrix and returns it. */ DenseMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ DenseMatrix extract(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices) const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices, const DenseMatrix& source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template<typename F> void apply(const F unaryElementFunction); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix &matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix &LU, const Vector &b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template<typename L> static DenseMatrix binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void forElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void parallelForElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template<typename L> void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForElementsInRowOrder(L handle) const; /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) time regardless of the number of * non-zeros actually present. */ template<typename L> void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) sequential time regardless of the number * of non-zeros actually present. */ template<typename L> void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) time regardless of the * number of non-zeros actually present. */ template<typename L> void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) sequential time regardless * of the number of non-zeros actually present. */ template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const; }; template<typename F> void DenseMatrix::apply(const F unaryElementFunction) { #pragma omp parallel for for (omp_index k = 0; k < static_cast<omp_index>(entries.size()); ++k) { entries[k] = unaryElementFunction(entries[k]); } } template<typename L> inline DenseMatrix DenseMatrix::binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std::vector<double> resultEntries(A.numberOfRows() * A.numberOfColumns(), 0.0); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(A.numberOfRows()); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template<typename L> inline void DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template<typename L> inline void DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); #pragma omp parallel for for (omp_index j = 0; j < static_cast<omp_index>(numberOfColumns()); ++j) { handle(j, entries[offset + j]); } } template<typename L> inline void DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template<typename L> inline void DenseMatrix::parallelForElementsInRowOrder(L handle) const { #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(nRows); ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template<typename L> inline void DenseMatrix::forNonZeroElementsInRow(index row, L handle) const { for (index j = 0, k = row * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(j, entries[k]); } } } template<typename L> inline void DenseMatrix::parallelForNonZeroElementsInRow(index row, L handle) const { #pragma omp parallel for for (omp_index j = 0; j < static_cast<omp_index>(numberOfColumns()); ++j) { index k = row * numberOfColumns() + j; if (entries[k] != getZero()) { handle(j, entries[k]); } } } template<typename L> inline void DenseMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i,j,entries[k]); } } } } template<typename L> inline void DenseMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(numberOfRows()); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i,j,entries[k]); } } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
/* * DenseMatrix.h * * Created on: Nov 25, 2015 Author: Michael Wegner * (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * @ingroup algebraic * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std: : vector < double >entries; double zero; public: /** Default constructor */ DenseMatrix(); /** * Constructs the DenseMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, double zero = 0.0); /** * Constructs the DenseMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, double zero = 0.0); /** * Constructs the @a dimension x @a dimension DenseMatrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs the @a nRows x @a nCols DenseMatrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @param zero The zero element (default is 0.0). * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std: : vector < double >&entries, double zero = 0.0); /** Default destructor */ virtual ~ DenseMatrix() = default; /** Default copy constructor */ DenseMatrix(const DenseMatrix & other)= default; /** Default move constructor */ DenseMatrix(DenseMatrix && other) = default; /** Default copy assignment operator */ DenseMatrix & operator = (DenseMatrix && other) = default; /** Default move assignment operator */ DenseMatrix & operator = (const DenseMatrix & other)= default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. * @note This function is linear in the number of columns of the matrix. */ count nnzInRow(const index i)const; /** * @return Number of non-zeros in this matrix. * @note This function takes nRows * nCols operations. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator() (const index i, const index j)const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i)const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j)const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator + (const DenseMatrix & other)const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix & operator += (const DenseMatrix & other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator - (const DenseMatrix & other)const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix & operator -= (const DenseMatrix & other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator *(const double &scalar)const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix & operator *= (const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator *(const Vector & vector)const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator *(const DenseMatrix & other)const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator / (const double &divisor)const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix & operator /= (const double &divisor); /** * Transposes this matrix and returns it. */ DenseMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ DenseMatrix extract(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices)const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices, const DenseMatrix & source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template < typename F > void apply(const F unaryElementFunction); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix & matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix & LU, const Vector & b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template < typename L > static DenseMatrix binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void forElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void parallelForElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template < typename L > void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle) const; /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) time regardless of the number of * non-zeros actually present. */ template < typename L > void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) sequential time regardless of the number * of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) time regardless of the * number of non-zeros actually present. */ template < typename L > void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) sequential time regardless * of the number of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRowOrder(L handle) const; }; template < typename F > void DenseMatrix::apply(const F unaryElementFunction) { for (omp_index k = 0; k < static_cast < omp_index > (entries.size()); ++k) { entries[k] = unaryElementFunction(entries[k]); } } template < typename L > inline DenseMatrix DenseMatrix: : binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std: : vector < double >resultEntries(A.numberOfRows() * A.numberOfColumns(), 0.0); for (omp_index i = 0; i < static_cast < omp_index > (A.numberOfRows()); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template < typename L > inline void DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template < typename L > inline void DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (omp_index j = 0; j < static_cast < omp_index > (numberOfColumns()); ++j) { handle(j, entries[offset + j]); } } template < typename L > inline void DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void DenseMatrix::parallelForElementsInRowOrder(L handle) const { for (omp_index i = 0; i < static_cast < omp_index > (nRows); ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRow(index row, L handle) const { for (index j = 0, k = row * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRow(index row, L handle) const { for (omp_index j = 0; j < static_cast < omp_index > (numberOfColumns()); ++j) { index k = row * numberOfColumns() + j; if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { for (omp_index i = 0; i < static_cast < omp_index > (numberOfRows()); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
/* * DenseMatrix.h * * Created on: Nov 25, 2015 Author: Michael Wegner * (michael.wegner@student.kit.edu) */ #ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ #include "../Globals.h" #include "AlgebraicGlobals.h" #include "Vector.h" #include <cassert> #include <vector> namespace NetworKit { /** * @ingroup algebraic * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves. * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation. */ class DenseMatrix { private: count nRows; count nCols; std: : vector < double >entries; double zero; public: /** Default constructor */ DenseMatrix(); /** * Constructs the DenseMatrix with size @a dimension x @a dimension. * @param dimension Defines how many rows and columns this matrix has. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, double zero = 0.0); /** * Constructs the DenseMatrix with size @a nRows x @a nCols. * @param nRows Number of rows. * @param nCols Number of columns. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, double zero = 0.0); /** * Constructs the @a dimension x @a dimension DenseMatrix from the elements at position @a positions with values @values. * @param dimension Defines how many rows and columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count dimension, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs the @a nRows x @a nCols DenseMatrix from the elements at position @a positions with values @values. * @param nRows Defines how many rows this matrix has. * @param nCols Defines how many columns this matrix has. * @param triplets The nonzero elements. * @param zero The zero element (default is 0.0). */ DenseMatrix(const count nRows, const count nCols, const std: : vector < Triplet > &triplets, double zero = 0.0); /** * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its * values (@a entries). * @param nRows Number of rows. * @param nCols Number of columns. * @param entries Entries of the matrix. * @param zero The zero element (default is 0.0). * @note The size of the @a entries vector should be equal to @a nRows * @a nCols. */ DenseMatrix(const count nRows, const count nCols, const std: : vector < double >&entries, double zero = 0.0); /** Default destructor */ virtual ~ DenseMatrix() = default; /** Default copy constructor */ DenseMatrix(const DenseMatrix & other)= default; /** Default move constructor */ DenseMatrix(DenseMatrix && other) = default; /** Default copy assignment operator */ DenseMatrix & operator = (DenseMatrix && other) = default; /** Default move assignment operator */ DenseMatrix & operator = (const DenseMatrix & other)= default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * Returns the zero element of the matrix. */ inline double getZero() const { return zero; } /** * @param i The row index. * @return Number of non-zeros in row @a i. * @note This function is linear in the number of columns of the matrix. */ count nnzInRow(const index i)const; /** * @return Number of non-zeros in this matrix. * @note This function takes nRows * nCols operations. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator() (const index i, const index j)const; /** * Set the matrix at position (@a i, @a j) to @a value. */ void setValue(const index i, const index j, const double value); /** * @return Row @a i of this matrix as vector. */ Vector row(const index i)const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j)const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ DenseMatrix operator + (const DenseMatrix & other)const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ DenseMatrix & operator += (const DenseMatrix & other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ DenseMatrix operator - (const DenseMatrix & other)const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ DenseMatrix & operator -= (const DenseMatrix & other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ DenseMatrix operator *(const double &scalar)const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ DenseMatrix & operator *= (const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator *(const Vector & vector)const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ DenseMatrix operator *(const DenseMatrix & other)const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ DenseMatrix operator / (const double &divisor)const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ DenseMatrix & operator /= (const double &divisor); /** * Transposes this matrix and returns it. */ DenseMatrix transpose() const; /** * Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix. * The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also * possible to specify a row or column more than once to get duplicates. * @param rowIndices * @param columnIndices */ DenseMatrix extract(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices)const; /** * Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and * @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of * this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows * and columns of @a source. * @param rowIndices * @param columnIndices * @param source */ void assign(const std::vector < index > &rowIndices, const std::vector < index > &columnIndices, const DenseMatrix & source); /** * Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the * function applied to the zero element of this matrix returns the zero element. * @param unaryElementFunction */ template < typename F > void apply(const F unaryElementFunction); /** * Decomposes the given @a matrix into lower L and upper U matrix (in-place). * @param matrix The matrix to decompose into LU. */ static void LUDecomposition(DenseMatrix & matrix); /** * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U. * @param LU Matrix decomposed into lower L and upper U matrix. * @param b Right-hand side. * @return Solution vector x to the linear equation system LU * x = b. */ static Vector LUSolve(const DenseMatrix & LU, const Vector & b); /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A * @param B * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions. */ template < typename L > static DenseMatrix binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp); /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void forElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template < typename L > void parallelForElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template < typename L > void forElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template < typename L > void parallelForElementsInRowOrder(L handle) const; /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) time regardless of the number of * non-zeros actually present. */ template < typename L > void forNonZeroElementsInRow(index row, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows()) sequential time regardless of the number * of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRow(index row, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) time regardless of the * number of non-zeros actually present. */ template < typename L > void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. * @note This is a DenseMatrix! Therefore this operation needs O(numberOfRows() * numberOfColumns()) sequential time regardless * of the number of non-zeros actually present. */ template < typename L > void parallelForNonZeroElementsInRowOrder(L handle) const; }; template < typename F > void DenseMatrix::apply(const F unaryElementFunction) { #pragma omp parallel for for (omp_index k = 0; k < static_cast < omp_index > (entries.size()); ++k) { entries[k] = unaryElementFunction(entries[k]); } } template < typename L > inline DenseMatrix DenseMatrix: : binaryOperator(const DenseMatrix & A, const DenseMatrix & B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); std: : vector < double >resultEntries(A.numberOfRows() * A.numberOfColumns(), 0.0); #pragma omp parallel for for (omp_index i = 0; i < static_cast < omp_index > (A.numberOfRows()); ++i) { index offset = i * A.numberOfColumns(); for (index j = offset; j < offset + A.numberOfColumns(); ++j) { resultEntries[j] = binaryOp(A.entries[j], B.entries[j]); } } return DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries); } template < typename L > inline void DenseMatrix::forElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(j, entries[k]); } } template < typename L > inline void DenseMatrix::parallelForElementsInRow(index i, L handle) const { index offset = i * numberOfColumns(); #pragma omp parallel for for (omp_index j = 0; j < static_cast < omp_index > (numberOfColumns()); ++j) { handle(j, entries[offset + j]); } } template < typename L > inline void DenseMatrix::forElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void DenseMatrix::parallelForElementsInRowOrder(L handle) const { #pragma omp parallel for for (omp_index i = 0; i < static_cast < omp_index > (nRows); ++i) { index offset = i * numberOfColumns(); for (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) { handle(i, j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRow(index row, L handle) const { for (index j = 0, k = row * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRow(index row, L handle) const { #pragma omp parallel for for (omp_index j = 0; j < static_cast < omp_index > (numberOfColumns()); ++j) { index k = row * numberOfColumns() + j; if (entries[k] != getZero()) { handle(j, entries[k]); } } } template < typename L > inline void DenseMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < numberOfRows(); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } template < typename L > inline void DenseMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { #pragma omp parallel for for (omp_index i = 0; i < static_cast < omp_index > (numberOfRows()); ++i) { for (index j = 0, k = i * numberOfColumns(); j < numberOfColumns(); ++j, ++k) { if (entries[k] != getZero()) { handle(i, j, entries[k]); } } } } } /* namespace NetworKit */ #endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */
mixed_tentusscher_myo_epi_2004_S2_10.c
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_10.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6190487792098,0.00127615275701324,0.780956535094998,0.780847063341448,0.000173448982750495,0.485618885325203,0.00292967767959199,0.999998364838194,1.91717709945144e-08,1.87830179933651e-05,0.999774468479350,1.00704023911659,0.999994520006527,4.64680265082926e-05,0.570657756232895,10.2852308259172,139.261705705374}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4075043407048,2.30190614350519e-05,0.000132186955734266,0.000460438593474590,0.230805741240155,0.128769301850520,0.167089340366410,4.76580224949500,0.0120157545262487,1.45704463630229,1089.95375481761,0.000516367300199849,0.468938665984628,0.0163624321716470,0.00234457045494790,4.25912616439814e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_10.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6190487792098,0.00127615275701324,0.780956535094998,0.780847063341448,0.000173448982750495,0.485618885325203,0.00292967767959199,0.999998364838194,1.91717709945144e-08,1.87830179933651e-05,0.999774468479350,1.00704023911659,0.999994520006527,4.64680265082926e-05,0.570657756232895,10.2852308259172,139.261705705374}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4075043407048,2.30190614350519e-05,0.000132186955734266,0.000460438593474590,0.230805741240155,0.128769301850520,0.167089340366410,4.76580224949500,0.0120157545262487,1.45704463630229,1089.95375481761,0.000516367300199849,0.468938665984628,0.0163624321716470,0.00234457045494790,4.25912616439814e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_10.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6190487792098,0.00127615275701324,0.780956535094998,0.780847063341448,0.000173448982750495,0.485618885325203,0.00292967767959199,0.999998364838194,1.91717709945144e-08,1.87830179933651e-05,0.999774468479350,1.00704023911659,0.999994520006527,4.64680265082926e-05,0.570657756232895,10.2852308259172,139.261705705374}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4075043407048,2.30190614350519e-05,0.000132186955734266,0.000460438593474590,0.230805741240155,0.128769301850520,0.167089340366410,4.76580224949500,0.0120157545262487,1.45704463630229,1089.95375481761,0.000516367300199849,0.468938665984628,0.0163624321716470,0.00234457045494790,4.25912616439814e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
GB_unop__abs_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_uint8_uint8) // op(A') function: GB (_unop_tran__abs_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_uint8_uint8) // op(A') function: GB (_unop_tran__abs_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_uint8_uint8) // op(A') function: GB (_unop_tran__abs_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task-taskgroup.c
/* * task-taskgroup.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp taskgroup { #pragma omp task shared(var, a) { var++; OMPT_SIGNAL(a); } // Give other thread time to steal the task. OMPT_WAIT(a, 1); } var++; } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
/* * task-taskgroup.c -- Archer testcase */ //=== ----------------------------------------------------------------------== = // // //Part of the LLVM Project, under the Apache License v2 .0 with LLVM Exceptions. // //See tools / archer / LICENSE.txt for details . // SPDX - License - Identifier:Apache - 2.0 WITH LLVM - exception // //===----------------------------------------------------------------------== = // //RUN: %libarcher - compile - and - run | FileCheck % s #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp taskgroup { #pragma omp task shared(var, a) { var++; OMPT_SIGNAL(a); } //Give other thread time to steal the task. OMPT_WAIT(a, 1); } var++; fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } //CHECK - NOT: ThreadSanitizer:data race // CHECK - NOT: ThreadSanitizer:reported // CHECK:DONE
/* * task-taskgroup.c -- Archer testcase */ //=== ----------------------------------------------------------------------== = // // //Part of the LLVM Project, under the Apache License v2 .0 with LLVM Exceptions. // //See tools / archer / LICENSE.txt for details . // SPDX - License - Identifier:Apache - 2.0 WITH LLVM - exception // //===----------------------------------------------------------------------== = // //RUN: %libarcher - compile - and - run | FileCheck % s #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp taskgroup { #pragma omp task shared(var, a) { var++; OMPT_SIGNAL(a); } //Give other thread time to steal the task. OMPT_WAIT(a, 1); } var++; } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } //CHECK - NOT: ThreadSanitizer:data race // CHECK - NOT: ThreadSanitizer:reported // CHECK:DONE
GB_binop__pow_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint16) // C=scalar+B GB (_bind1st__pow_uint16) // C=scalar+B' GB (_bind1st_tran__pow_uint16) // C=A+scalar GB (_bind2nd__pow_uint16) // C=A'+scalar GB (_bind2nd_tran__pow_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_pow_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT16 || GxB_NO_POW_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mp.c
#include <stdio.h> #define Const(x,y,z) x##y##z #define float Const(un,sign,ed) float x, s[1], a[1], e[1], d[1]; float A(float a, float b){return a&b?A(a^b,(a&b)<<1):a^b;} float P(float a, float b){return a?P(a/10,'-')+putchar(a%10+48)-48:0;} float G(float a, float b){for(;b;b^=a^=b^=a%=b);return !--a;} float F(float a, float b){return b?G(a,b)+F(a,b-1):0;} float S(float a, float b){return a?a<b?0:S(A(a,1+~b),b+2):1;} int main() { *d=25; char str[10][10] = { ":Ugly", ":Good", ":Bad"}; int result[200005]; #pragma omp parallel for private(x) schedule(dynamic, 8) for( x=1; x<=200000; x++ ) { int t, X = x, sum; for( sum=0; X; X/=10 ) sum += X % 10; float y = sum; if ( x % y ) t = 0; else if ( S(F(x,x),1) ) t = 1; else t = 2; result[x] = t; } for( x=1; x<=200000; x++ ) { printf("%d", x); puts( str[result[x]] ); } printf("Who's %d?\n", (*s)[a][e][e][d]); return 0; }
#include <stdio.h> #define Const(x,y,z) x##y##z #define float Const(un,sign,ed) float x, s[1], a[1], e[1], d[1]; float A(float a, float b) { return a & b ? A(a ^ b, (a & b) << 1) : a ^ b; } float P(float a, float b) { return a ? P(a / 10, '-') + putchar(a % 10 + 48) - 48 : 0; } float G(float a, float b) { for (; b; b ^= a ^= b ^= a %= b); return !--a; } float F(float a, float b) { return b ? G(a, b) + F(a, b - 1) : 0; } float S(float a, float b) { return a ? a < b ? 0 : S(A(a, 1 + ~b), b + 2) : 1; } int main() { *d = 25; char str[10][10] = {":Ugly", ":Good", ":Bad"}; int result[200005]; for (x = 1; x <= 200000; x++) { int t, X = x, sum; for (sum = 0; X; X /= 10) sum += X % 10; float y = sum; if (x % y) t = 0; else if (S(F(x, x), 1)) t = 1; else t = 2; result[x] = t; } for (x = 1; x <= 200000; x++) { printf("%d", x); puts(str[result[x]]); } printf("Who's %d?\n", (*s)[a][e][e][d]); return 0; }
#include <stdio.h> #define Const(x,y,z) x##y##z #define float Const(un,sign,ed) float x, s[1], a[1], e[1], d[1]; float A(float a, float b) { return a & b ? A(a ^ b, (a & b) << 1) : a ^ b; } float P(float a, float b) { return a ? P(a / 10, '-') + putchar(a % 10 + 48) - 48 : 0; } float G(float a, float b) { for (; b; b ^= a ^= b ^= a %= b); return !--a; } float F(float a, float b) { return b ? G(a, b) + F(a, b - 1) : 0; } float S(float a, float b) { return a ? a < b ? 0 : S(A(a, 1 + ~b), b + 2) : 1; } int main() { *d = 25; char str[10][10] = {":Ugly", ":Good", ":Bad"}; int result[200005]; #pragma omp parallel for private(x) schedule(dynamic, 8) for (x = 1; x <= 200000; x++) { int t, X = x, sum; for (sum = 0; X; X /= 10) sum += X % 10; float y = sum; if (x % y) t = 0; else if (S(F(x, x), 1)) t = 1; else t = 2; result[x] = t; } for (x = 1; x <= 200000; x++) { printf("%d", x); puts(str[result[x]]); } printf("Who's %d?\n", (*s)[a][e][e][d]); return 0; }
GB_kroner.c
//------------------------------------------------------------------------------ // GB_kroner: Kronecker product, C = kron (A,B) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // C = kron(A,B) where op determines the binary multiplier to use. The type of // A and B are compatible with the x and y inputs of z=op(x,y), but can be // different. The type of C is the type of z. C is hypersparse if either A // or B are hypersparse. // FUTURE: GB_kron would be faster with built-in types and operators. // FUTURE: at most one thread is used for each vector of C=kron(A,B). The // matrix C is normally very large, but if both A and B are n-by-1, then C is // n^2-by-1 and only a single thread is used. A better method for this case // would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not // accounted for in the parallel load-balancing. #include "GB_kron.h" GrB_Info GB_kroner // C = kron (A,B) ( GrB_Matrix *Chandle, // output matrix const bool C_is_csc, // desired format of C const GrB_BinaryOp op, // multiply operator const GrB_Matrix A, // input matrix const GrB_Matrix B, // input matrix GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (Chandle != NULL) ; ASSERT_OK (GB_check (A, "A for kron (A,B)", GB0)) ; ASSERT_OK (GB_check (B, "B for kron (A,B)", GB0)) ; ASSERT_OK (GB_check (op, "op for kron (A,B)", GB0)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Info info ; (*Chandle) = NULL ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const GB_void *restrict Ax = A->x ; const int64_t asize = A->type->size ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; int64_t anvec = A->nvec ; int64_t anz = GB_NNZ (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int64_t *restrict Bi = B->i ; const GB_void *restrict Bx = B->x ; const int64_t bsize = B->type->size ; const int64_t bvlen = B->vlen ; const int64_t bvdim = B->vdim ; int64_t bnvec = B->nvec ; int64_t bnz = GB_NNZ (B) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- double work = ((double) anz) * ((double) bnz) + (((double) anvec) * ((double) bnvec)) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate the output matrix C //-------------------------------------------------------------------------- // C has the same type as z for the multiply operator, z=op(x,y) GrB_Index cvlen, cvdim, cnzmax, cnvec ; bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ; ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ; ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ; ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ; ASSERT (ok) ; // C is hypersparse if either A or B are hypersparse bool C_is_hyper = (cvdim > 1) && (A->is_hyper || B->is_hyper) ; GrB_Matrix C = NULL ; // allocate a new header for C GB_CREATE (&C, op->ztype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, GB_SAME_HYPER_AS (C_is_hyper), B->hyper_ratio, cnvec, cnzmax, true, Context) ; if (info != GrB_SUCCESS) { // out of memory return (info) ; } //-------------------------------------------------------------------------- // get C //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = C->x ; const int64_t csize = C->type->size ; GxB_binary_function fmult = op->function ; GB_cast_function cast_A = GB_cast_factory (op->xtype->code, A->type->code), cast_B = GB_cast_factory (op->ytype->code, B->type->code) ; //-------------------------------------------------------------------------- // compute the column counts of C, and C->h if C is hypersparse //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2) for (int64_t kA = 0 ; kA < anvec ; kA++) { for (int64_t kB = 0 ; kB < bnvec ; kB++) { // get A(:,jA), the (kA)th vector of A int64_t jA = (Ah == NULL) ? kA : Ah [kA] ; int64_t aknz = Ap [kA+1] - Ap [kA] ; // get B(:,jB), the (kB)th vector of B int64_t jB = (Bh == NULL) ? kB : Bh [kB] ; int64_t bknz = Bp [kB+1] - Bp [kB] ; // determine # entries in C(:,jC), the (kC)th vector of C int64_t kC = kA * bnvec + kB ; Cp [kC] = aknz * bknz ; if (C_is_hyper) { Ch [kC] = jA * bvdim + jB ; } } } //-------------------------------------------------------------------------- // replace Cp with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ; if (C_is_hyper) C->nvec = cnvec ; C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // C = kron (A,B) //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2) for (int64_t kA = 0 ; kA < anvec ; kA++) { for (int64_t kB = 0 ; kB < bnvec ; kB++) { // get B(:,jB), the (kB)th vector of B int64_t pB_start = Bp [kB] ; int64_t pB_end = Bp [kB+1] ; int64_t bknz = pB_start - pB_end ; if (bknz == 0) continue ; GB_void bwork [bsize] ; // get C(:,jC), the (kC)th vector of C int64_t kC = kA * bnvec + kB ; int64_t pC = Cp [kC] ; // get A(:,jA), the (kA)th vector of A int64_t pA_start = Ap [kA] ; int64_t pA_end = Ap [kA+1] ; GB_void awork [asize] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // awork = A(iA,jA), typecasted to op->xtype int64_t iA = Ai [pA] ; int64_t iAblock = iA * bvlen ; cast_A (awork, Ax +(pA*asize), asize) ; for (int64_t pB = pB_start ; pB < pB_end ; pB++) { // bwork = B(iB,jB), typecasted to op->ytype int64_t iB = Bi [pB] ; cast_B (bwork, Bx +(pB*bsize), bsize) ; // C(iC,jC) = A(iA,jA) * B(iB,jB) int64_t iC = iAblock + iB ; Ci [pC] = iC ; fmult (Cx +(pC*csize), awork, bwork) ; pC++ ; } } } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- if (C_is_hyper && C->nvec_nonempty < cnvec) { // create new Cp_new and Ch_new arrays, with no empty vectors int64_t *restrict Cp_new = NULL ; int64_t *restrict Ch_new = NULL ; int64_t nvec_new ; info = GB_hyper_prune (&Cp_new, &Ch_new, &nvec_new, C->p, C->h, cnvec, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_MATRIX_FREE (&C) ; return (info) ; } // transplant the new hyperlist into C GB_FREE_MEMORY (C->p, cnvec+1, sizeof (int64_t)) ; GB_FREE_MEMORY (C->h, cnvec, sizeof (int64_t)) ; C->p = Cp_new ; C->h = Ch_new ; C->nvec = nvec_new ; C->plen = nvec_new ; ASSERT (C->nvec == C->nvec_nonempty) ; } ASSERT (C->nvec_nonempty == GB_nvec_nonempty (C, Context)) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_OK (GB_check (C, "C=kron(A,B)", GB0)) ; (*Chandle) = C ; return (GrB_SUCCESS) ; }
// ------------------------------------------------------------------------------ //GB_kroner:Kronecker product, C = kron(A, B) // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS / Doc / License.txt for license . // ------------------------------------------------------------------------------ //C = kron(A, B) where op determines the binary multiplier to use.The type of // A and B are compatible with the x and y inputs of z = op(x, y), but can be // different.The type of C is the type of z.C is hypersparse if either A // or B are hypersparse. // FUTURE: GB_kron would be faster with built - in types and operators. // FUTURE: at most one thread is used for each vector of C = kron(A, B) .The // matrix C is normally very large, but if both A and B are n - by - 1, then C is // n ^ 2 - by - 1 and only a single thread is used.A better method for this case //would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:, k))) work, but this is not // accounted for in the parallel load - balancing. #include "GB_kron.h" GrB_Info GB_kroner // C = kron(A, B) ( GrB_Matrix * Chandle, //output matrix const bool C_is_csc, //desired format of C const GrB_BinaryOp op, //multiply operator const GrB_Matrix A, //input matrix const GrB_Matrix B, //input matrix GB_Context Context ) { //-------------------------------------------------------------------------- //check inputs // -------------------------------------------------------------------------- ASSERT(Chandle != NULL); ASSERT_OK(GB_check(A, "A for kron (A,B)", GB0)); ASSERT_OK(GB_check(B, "B for kron (A,B)", GB0)); ASSERT_OK(GB_check(op, "op for kron (A,B)", GB0)); ASSERT(!GB_PENDING(A)); ASSERT(!GB_ZOMBIES(A)); ASSERT(!GB_PENDING(B)); ASSERT(!GB_ZOMBIES(B)); //-------------------------------------------------------------------------- //get inputs // -------------------------------------------------------------------------- GrB_Info info; (*Chandle) = NULL; const int64_t *restrict Ap = A->p; const int64_t *restrict Ah = A->h; const int64_t *restrict Ai = A->i; const GB_void *restrict Ax = A->x; const int64_t asize = A->type->size; const int64_t avlen = A->vlen; const int64_t avdim = A->vdim; int64_t anvec = A->nvec; int64_t anz = GB_NNZ(A); const int64_t *restrict Bp = B->p; const int64_t *restrict Bh = B->h; const int64_t *restrict Bi = B->i; const GB_void *restrict Bx = B->x; const int64_t bsize = B->type->size; const int64_t bvlen = B->vlen; const int64_t bvdim = B->vdim; int64_t bnvec = B->nvec; int64_t bnz = GB_NNZ(B); //-------------------------------------------------------------------------- //determine the number of threads to use // -------------------------------------------------------------------------- double work = ((double)anz) * ((double)bnz) + (((double)anvec) * ((double)bnvec)); GB_GET_NTHREADS_MAX(nthreads_max, chunk, Context); int nthreads = GB_nthreads(work, chunk, nthreads_max); //-------------------------------------------------------------------------- //allocate the output matrix C // -------------------------------------------------------------------------- //C has the same type as z for the multiply operator, z = op(x, y) GrB_Index cvlen, cvdim, cnzmax, cnvec; bool ok = GB_Index_multiply(&cvlen, avlen, bvlen); ok = ok & GB_Index_multiply(&cvdim, avdim, bvdim); ok = ok & GB_Index_multiply(&cnzmax, anz, bnz); ok = ok & GB_Index_multiply(&cnvec, anvec, bnvec); ASSERT(ok); //C is hypersparse if either A or B are hypersparse bool C_is_hyper = (cvdim > 1) &&(A->is_hyper || B->is_hyper); GrB_Matrix C = NULL; //allocate a new header for C GB_CREATE(&C, op->ztype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, GB_SAME_HYPER_AS(C_is_hyper), B->hyper_ratio, cnvec, cnzmax, true, Context); if (info != GrB_SUCCESS) { //out of memory return (info); } //-------------------------------------------------------------------------- //get C // -------------------------------------------------------------------------- int64_t * restrict Cp = C->p; int64_t *restrict Ch = C->h; int64_t *restrict Ci = C->i; GB_void *restrict Cx = C->x; const int64_t csize = C->type->size; GxB_binary_function fmult = op->function; GB_cast_function cast_A = GB_cast_factory(op->xtype->code, A->type->code), cast_B = GB_cast_factory(op->ytype->code, B->type->code); //-------------------------------------------------------------------------- //compute the column counts of C, and C->h if C is hypersparse // -------------------------------------------------------------------------- for (int64_t kA = 0; kA < anvec; kA++) { for (int64_t kB = 0; kB < bnvec; kB++) { //get A(:,jA), the(kA) th vector of A int64_t jA = (Ah == NULL) ? kA : Ah[kA]; int64_t aknz = Ap[kA + 1] - Ap[kA]; //get B(:,jB), the(kB) th vector of B int64_t jB = (Bh == NULL) ? kB : Bh[kB]; int64_t bknz = Bp[kB + 1] - Bp[kB]; //determine #entries in C(:,jC), the (kC)th vector of C int64_t kC = kA * bnvec + kB; Cp[kC] = aknz * bknz; if (C_is_hyper) { Ch[kC] = jA * bvdim + jB; } } } //-------------------------------------------------------------------------- //replace Cp with its cumulative sum // -------------------------------------------------------------------------- GB_cumsum(Cp, cnvec, &(C->nvec_nonempty), nthreads); if (C_is_hyper) C->nvec = cnvec; C->magic = GB_MAGIC; //-------------------------------------------------------------------------- //C = kron(A, B) // -------------------------------------------------------------------------- for (int64_t kA = 0; kA < anvec; kA++) { for (int64_t kB = 0; kB < bnvec; kB++) { //get B(:,jB), the(kB) th vector of B int64_t pB_start = Bp[kB]; int64_t pB_end = Bp[kB + 1]; int64_t bknz = pB_start - pB_end; if (bknz == 0) continue; GB_void bwork[bsize]; //get C(:,jC), the(kC) th vector of C int64_t kC = kA * bnvec + kB; int64_t pC = Cp[kC]; //get A(:,jA), the(kA) th vector of A int64_t pA_start = Ap[kA]; int64_t pA_end = Ap[kA + 1]; GB_void awork[asize]; for (int64_t pA = pA_start; pA < pA_end; pA++) { //awork = A(iA, jA), typecasted to op->xtype int64_t iA = Ai[pA]; int64_t iAblock = iA * bvlen; cast_A(awork, Ax + (pA * asize), asize); for (int64_t pB = pB_start; pB < pB_end; pB++) { //bwork = B(iB, jB), typecasted to op->ytype int64_t iB = Bi[pB]; cast_B(bwork, Bx + (pB * bsize), bsize); //C(iC, jC) = A(iA, jA) * B(iB, jB) int64_t iC = iAblock + iB; Ci[pC] = iC; fmult(Cx + (pC * csize), awork, bwork); pC++; } } } } //-------------------------------------------------------------------------- //remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- if (C_is_hyper && C->nvec_nonempty < cnvec) { //create new Cp_new and Ch_new arrays, with no empty vectors int64_t * restrict Cp_new = NULL; int64_t *restrict Ch_new = NULL; int64_t nvec_new; info = GB_hyper_prune(&Cp_new, &Ch_new, &nvec_new, C->p, C->h, cnvec, Context); if (info != GrB_SUCCESS) { //out of memory GB_MATRIX_FREE(&C); return (info); } //transplant the new hyperlist into C GB_FREE_MEMORY(C->p, cnvec + 1, sizeof(int64_t)); GB_FREE_MEMORY(C->h, cnvec, sizeof(int64_t)); C->p = Cp_new; C->h = Ch_new; C->nvec = nvec_new; C->plen = nvec_new; ASSERT(C->nvec == C->nvec_nonempty); } ASSERT(C->nvec_nonempty == GB_nvec_nonempty(C, Context)); //-------------------------------------------------------------------------- //return result // -------------------------------------------------------------------------- ASSERT_OK(GB_check(C, "C=kron(A,B)", GB0)); (*Chandle) = C; return (GrB_SUCCESS); }
// ------------------------------------------------------------------------------ //GB_kroner:Kronecker product, C = kron(A, B) // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS / Doc / License.txt for license . // ------------------------------------------------------------------------------ //C = kron(A, B) where op determines the binary multiplier to use.The type of // A and B are compatible with the x and y inputs of z = op(x, y), but can be // different.The type of C is the type of z.C is hypersparse if either A // or B are hypersparse. // FUTURE: GB_kron would be faster with built - in types and operators. // FUTURE: at most one thread is used for each vector of C = kron(A, B) .The // matrix C is normally very large, but if both A and B are n - by - 1, then C is // n ^ 2 - by - 1 and only a single thread is used.A better method for this case //would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:, k))) work, but this is not // accounted for in the parallel load - balancing. #include "GB_kron.h" GrB_Info GB_kroner // C = kron(A, B) ( GrB_Matrix * Chandle, //output matrix const bool C_is_csc, //desired format of C const GrB_BinaryOp op, //multiply operator const GrB_Matrix A, //input matrix const GrB_Matrix B, //input matrix GB_Context Context ) { //-------------------------------------------------------------------------- //check inputs // -------------------------------------------------------------------------- ASSERT(Chandle != NULL); ASSERT_OK(GB_check(A, "A for kron (A,B)", GB0)); ASSERT_OK(GB_check(B, "B for kron (A,B)", GB0)); ASSERT_OK(GB_check(op, "op for kron (A,B)", GB0)); ASSERT(!GB_PENDING(A)); ASSERT(!GB_ZOMBIES(A)); ASSERT(!GB_PENDING(B)); ASSERT(!GB_ZOMBIES(B)); //-------------------------------------------------------------------------- //get inputs // -------------------------------------------------------------------------- GrB_Info info; (*Chandle) = NULL; const int64_t *restrict Ap = A->p; const int64_t *restrict Ah = A->h; const int64_t *restrict Ai = A->i; const GB_void *restrict Ax = A->x; const int64_t asize = A->type->size; const int64_t avlen = A->vlen; const int64_t avdim = A->vdim; int64_t anvec = A->nvec; int64_t anz = GB_NNZ(A); const int64_t *restrict Bp = B->p; const int64_t *restrict Bh = B->h; const int64_t *restrict Bi = B->i; const GB_void *restrict Bx = B->x; const int64_t bsize = B->type->size; const int64_t bvlen = B->vlen; const int64_t bvdim = B->vdim; int64_t bnvec = B->nvec; int64_t bnz = GB_NNZ(B); //-------------------------------------------------------------------------- //determine the number of threads to use // -------------------------------------------------------------------------- double work = ((double)anz) * ((double)bnz) + (((double)anvec) * ((double)bnvec)); GB_GET_NTHREADS_MAX(nthreads_max, chunk, Context); int nthreads = GB_nthreads(work, chunk, nthreads_max); //-------------------------------------------------------------------------- //allocate the output matrix C // -------------------------------------------------------------------------- //C has the same type as z for the multiply operator, z = op(x, y) GrB_Index cvlen, cvdim, cnzmax, cnvec; bool ok = GB_Index_multiply(&cvlen, avlen, bvlen); ok = ok & GB_Index_multiply(&cvdim, avdim, bvdim); ok = ok & GB_Index_multiply(&cnzmax, anz, bnz); ok = ok & GB_Index_multiply(&cnvec, anvec, bnvec); ASSERT(ok); //C is hypersparse if either A or B are hypersparse bool C_is_hyper = (cvdim > 1) &&(A->is_hyper || B->is_hyper); GrB_Matrix C = NULL; //allocate a new header for C GB_CREATE(&C, op->ztype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, GB_SAME_HYPER_AS(C_is_hyper), B->hyper_ratio, cnvec, cnzmax, true, Context); if (info != GrB_SUCCESS) { //out of memory return (info); } //-------------------------------------------------------------------------- //get C // -------------------------------------------------------------------------- int64_t * restrict Cp = C->p; int64_t *restrict Ch = C->h; int64_t *restrict Ci = C->i; GB_void *restrict Cx = C->x; const int64_t csize = C->type->size; GxB_binary_function fmult = op->function; GB_cast_function cast_A = GB_cast_factory(op->xtype->code, A->type->code), cast_B = GB_cast_factory(op->ytype->code, B->type->code); //-------------------------------------------------------------------------- //compute the column counts of C, and C->h if C is hypersparse // -------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2) for (int64_t kA = 0; kA < anvec; kA++) { for (int64_t kB = 0; kB < bnvec; kB++) { //get A(:,jA), the(kA) th vector of A int64_t jA = (Ah == NULL) ? kA : Ah[kA]; int64_t aknz = Ap[kA + 1] - Ap[kA]; //get B(:,jB), the(kB) th vector of B int64_t jB = (Bh == NULL) ? kB : Bh[kB]; int64_t bknz = Bp[kB + 1] - Bp[kB]; //determine #entries in C(:,jC), the (kC)th vector of C int64_t kC = kA * bnvec + kB; Cp[kC] = aknz * bknz; if (C_is_hyper) { Ch[kC] = jA * bvdim + jB; } } } //-------------------------------------------------------------------------- //replace Cp with its cumulative sum // -------------------------------------------------------------------------- GB_cumsum(Cp, cnvec, &(C->nvec_nonempty), nthreads); if (C_is_hyper) C->nvec = cnvec; C->magic = GB_MAGIC; //-------------------------------------------------------------------------- //C = kron(A, B) // -------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2) for (int64_t kA = 0; kA < anvec; kA++) { for (int64_t kB = 0; kB < bnvec; kB++) { //get B(:,jB), the(kB) th vector of B int64_t pB_start = Bp[kB]; int64_t pB_end = Bp[kB + 1]; int64_t bknz = pB_start - pB_end; if (bknz == 0) continue; GB_void bwork[bsize]; //get C(:,jC), the(kC) th vector of C int64_t kC = kA * bnvec + kB; int64_t pC = Cp[kC]; //get A(:,jA), the(kA) th vector of A int64_t pA_start = Ap[kA]; int64_t pA_end = Ap[kA + 1]; GB_void awork[asize]; for (int64_t pA = pA_start; pA < pA_end; pA++) { //awork = A(iA, jA), typecasted to op->xtype int64_t iA = Ai[pA]; int64_t iAblock = iA * bvlen; cast_A(awork, Ax + (pA * asize), asize); for (int64_t pB = pB_start; pB < pB_end; pB++) { //bwork = B(iB, jB), typecasted to op->ytype int64_t iB = Bi[pB]; cast_B(bwork, Bx + (pB * bsize), bsize); //C(iC, jC) = A(iA, jA) * B(iB, jB) int64_t iC = iAblock + iB; Ci[pC] = iC; fmult(Cx + (pC * csize), awork, bwork); pC++; } } } } //-------------------------------------------------------------------------- //remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- if (C_is_hyper && C->nvec_nonempty < cnvec) { //create new Cp_new and Ch_new arrays, with no empty vectors int64_t * restrict Cp_new = NULL; int64_t *restrict Ch_new = NULL; int64_t nvec_new; info = GB_hyper_prune(&Cp_new, &Ch_new, &nvec_new, C->p, C->h, cnvec, Context); if (info != GrB_SUCCESS) { //out of memory GB_MATRIX_FREE(&C); return (info); } //transplant the new hyperlist into C GB_FREE_MEMORY(C->p, cnvec + 1, sizeof(int64_t)); GB_FREE_MEMORY(C->h, cnvec, sizeof(int64_t)); C->p = Cp_new; C->h = Ch_new; C->nvec = nvec_new; C->plen = nvec_new; ASSERT(C->nvec == C->nvec_nonempty); } ASSERT(C->nvec_nonempty == GB_nvec_nonempty(C, Context)); //-------------------------------------------------------------------------- //return result // -------------------------------------------------------------------------- ASSERT_OK(GB_check(C, "C=kron(A,B)", GB0)); (*Chandle) = C; return (GrB_SUCCESS); }
GB_binop__isle_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_fp64) // A*D function (colscale): GB (_AxD__isle_fp64) // D*A function (rowscale): GB (_DxB__isle_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__isle_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__isle_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_fp64) // C=scalar+B GB (_bind1st__isle_fp64) // C=scalar+B' GB (_bind1st_tran__isle_fp64) // C=A+scalar GB (_bind2nd__isle_fp64) // C=A'+scalar GB (_bind2nd_tran__isle_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_FP64 || GxB_NO_ISLE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_fp64) // A*D function (colscale): GB (_AxD__isle_fp64) // D*A function (rowscale): GB (_DxB__isle_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__isle_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__isle_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_fp64) // C=scalar+B GB (_bind1st__isle_fp64) // C=scalar+B' GB (_bind1st_tran__isle_fp64) // C=A+scalar GB (_bind2nd__isle_fp64) // C=A'+scalar GB (_bind2nd_tran__isle_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_FP64 || GxB_NO_ISLE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__isle_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_fp64) // A*D function (colscale): GB (_AxD__isle_fp64) // D*A function (rowscale): GB (_DxB__isle_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__isle_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__isle_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_fp64) // C=scalar+B GB (_bind1st__isle_fp64) // C=scalar+B' GB (_bind1st_tran__isle_fp64) // C=A+scalar GB (_bind2nd__isle_fp64) // C=A'+scalar GB (_bind2nd_tran__isle_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_FP64 || GxB_NO_ISLE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
schedule.c
/* cc -lm t4.c -qsmp */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #define FLT double /* utility routines */ void my_bar(); void explain(char astr[]); FLT system_clock(FLT *x); void start_time(); FLT end_time(); /* array used to determine how much work each thread performs */ int *dist,idid; FLT st; /* routine to reset dist */ void zero(int j); /* work routines */ void all_fast(); void zero_slow(); void a_slow(int i); void all_fast() { int k; k=omp_get_thread_num(); dist[k]++; } void zero_slow() { int k; FLT x,y; k=omp_get_thread_num(); dist[k]++; if(k == 0) { x=system_clock((FLT*)0); y=x+1; while(x < y) { x=system_clock((FLT*)0); } } } void imbalance (int i) { int k; FLT x,y; k=omp_get_thread_num(); dist[k]++; if(i == 1) { idid=k; x=system_clock((FLT*)0); y=x+1; while(x < y) { x=system_clock((FLT*)0); } } else { x=system_clock((FLT*)0); y=x+0.01; while(x < y) { x=system_clock((FLT*)0); } } } main() { int i,k,max_threads,total; max_threads=omp_get_max_threads(); printf("max threads = %d\n",max_threads); dist=(int*)malloc(max_threads*sizeof(int)); zero(max_threads); total=0; explain("report the % of iterations for each thread"); explain("for a set of loops"); explain("******"); explain("default scheduling"); explain("for a subroutine with little work"); k=max_threads*100; start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { all_fast(); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("default scheduling"); explain("for a subroutine with thread 0 given 1 second of work"); k=max_threads*4; start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { zero_slow(); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(static,1)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(static,1) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(static,2)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(static,2) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,1)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(dynamic,1) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,2)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,4)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("default scheduling"); explain("for an imbalanced subroutine"); k=max_threads*100; start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { imbalance(i); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("default scheduling"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { imbalance(i); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(static,1)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(static,1) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(static,2)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(static,2) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,1)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(dynamic,1) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,2)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,4)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; my_bar(); } void my_bar() { #pragma omp barrier fflush(stdout); #pragma omp barrier } void explain(char astr[]){ printf("****** %s\n",astr); } FLT system_clock(FLT *x) { FLT t; FLT six=1.0e-6; struct timeval tb; struct timezone tz; gettimeofday(&tb,&tz); t=(FLT)tb.tv_sec+((FLT)tb.tv_usec)*six; if(x){ *x=t; } return(t); } void zero(int j) { int i; for( i=0;i<j;i++) { dist[i]=0; } } void start_time() { st=system_clock((FLT*)0); } FLT end_time() { return (system_clock((FLT*)0)-st); }
/* cc -lm t4.c -qsmp */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #define FLT double /* utility routines */ void my_bar(); void explain(char astr[]); FLT system_clock(FLT *x); void start_time(); FLT end_time(); /* array used to determine how much work each thread performs */ int *dist,idid; FLT st; /* routine to reset dist */ void zero(int j); /* work routines */ void all_fast(); void zero_slow(); void a_slow(int i); void all_fast() { int k; k=omp_get_thread_num(); dist[k]++; } void zero_slow() { int k; FLT x,y; k=omp_get_thread_num(); dist[k]++; if(k == 0) { x=system_clock((FLT*)0); y=x+1; while(x < y) { x=system_clock((FLT*)0); } } } void imbalance (int i) { int k; FLT x,y; k=omp_get_thread_num(); dist[k]++; if(i == 1) { idid=k; x=system_clock((FLT*)0); y=x+1; while(x < y) { x=system_clock((FLT*)0); } } else { x=system_clock((FLT*)0); y=x+0.01; while(x < y) { x=system_clock((FLT*)0); } } } main() { int i,k,max_threads,total; max_threads=omp_get_max_threads(); printf("max threads = %d\n",max_threads); dist=(int*)malloc(max_threads*sizeof(int)); zero(max_threads); total=0; explain("report the % of iterations for each thread"); explain("for a set of loops"); explain("******"); explain("default scheduling"); explain("for a subroutine with little work"); k=max_threads*100; start_time(); for( i=1;i<=k;i++) { all_fast(); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("default scheduling"); explain("for a subroutine with thread 0 given 1 second of work"); k=max_threads*4; start_time(); for( i=1;i<=k;i++) { zero_slow(); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(static,1)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(static,2)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,1)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,2)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,4)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("default scheduling"); explain("for an imbalanced subroutine"); k=max_threads*100; start_time(); for( i=1;i<=k;i++) { imbalance(i); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("default scheduling"); explain("for an imbalanced subroutine"); start_time(); for( i=1;i<=k;i++) { imbalance(i); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(static,1)"); explain("for an imbalanced subroutine"); start_time(); for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(static,2)"); explain("for an imbalanced subroutine"); start_time(); for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,1)"); explain("for an imbalanced subroutine"); start_time(); for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,2)"); explain("for an imbalanced subroutine"); start_time(); for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,4)"); explain("for an imbalanced subroutine"); start_time(); for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; my_bar(); } void my_bar() { fflush(stdout); } void explain(char astr[]){ printf("****** %s\n",astr); } FLT system_clock(FLT *x) { FLT t; FLT six=1.0e-6; struct timeval tb; struct timezone tz; gettimeofday(&tb,&tz); t=(FLT)tb.tv_sec+((FLT)tb.tv_usec)*six; if(x){ *x=t; } return(t); } void zero(int j) { int i; for( i=0;i<j;i++) { dist[i]=0; } } void start_time() { st=system_clock((FLT*)0); } FLT end_time() { return (system_clock((FLT*)0)-st); }
/* cc -lm t4.c -qsmp */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #define FLT double /* utility routines */ void my_bar(); void explain(char astr[]); FLT system_clock(FLT *x); void start_time(); FLT end_time(); /* array used to determine how much work each thread performs */ int *dist,idid; FLT st; /* routine to reset dist */ void zero(int j); /* work routines */ void all_fast(); void zero_slow(); void a_slow(int i); void all_fast() { int k; k=omp_get_thread_num(); dist[k]++; } void zero_slow() { int k; FLT x,y; k=omp_get_thread_num(); dist[k]++; if(k == 0) { x=system_clock((FLT*)0); y=x+1; while(x < y) { x=system_clock((FLT*)0); } } } void imbalance (int i) { int k; FLT x,y; k=omp_get_thread_num(); dist[k]++; if(i == 1) { idid=k; x=system_clock((FLT*)0); y=x+1; while(x < y) { x=system_clock((FLT*)0); } } else { x=system_clock((FLT*)0); y=x+0.01; while(x < y) { x=system_clock((FLT*)0); } } } main() { int i,k,max_threads,total; max_threads=omp_get_max_threads(); printf("max threads = %d\n",max_threads); dist=(int*)malloc(max_threads*sizeof(int)); zero(max_threads); total=0; explain("report the % of iterations for each thread"); explain("for a set of loops"); explain("******"); explain("default scheduling"); explain("for a subroutine with little work"); k=max_threads*100; start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { all_fast(); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("default scheduling"); explain("for a subroutine with thread 0 given 1 second of work"); k=max_threads*4; start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { zero_slow(); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(static,1)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(static,1) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(static,2)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(static,2) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,1)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(dynamic,1) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,2)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("schedule(dynamic,4)"); explain("for a subroutine with thread 0 given 1 second of work"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { zero_slow(); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n\n",total, end_time()); total=0; zero(max_threads); explain("default scheduling"); explain("for an imbalanced subroutine"); k=max_threads*100; start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { imbalance(i); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("default scheduling"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for for( i=1;i<=k;i++) { imbalance(i); } my_bar(); for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(static,1)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(static,1) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(static,2)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(static,2) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,1)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(dynamic,1) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,2)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; zero(max_threads); explain("schedule(dynamic,4)"); explain("for an imbalanced subroutine"); start_time(); #pragma omp parallel for schedule(dynamic,2) for( i=1;i<=k;i++) { imbalance(i); } for( i=0;i<max_threads;i++) { printf("%d %6.2f %%\n",i,100.0*(FLT)dist[i]/((FLT)k)); total=total+dist[i]; } printf(" total iterations: %d time %10.2f\n",total, end_time()); printf(" thread %d did the slow iteration\n\n",idid); total=0; my_bar(); } void my_bar() { #pragma omp barrier fflush(stdout); #pragma omp barrier } void explain(char astr[]){ printf("****** %s\n",astr); } FLT system_clock(FLT *x) { FLT t; FLT six=1.0e-6; struct timeval tb; struct timezone tz; gettimeofday(&tb,&tz); t=(FLT)tb.tv_sec+((FLT)tb.tv_usec)*six; if(x){ *x=t; } return(t); } void zero(int j) { int i; for( i=0;i<j;i++) { dist[i]=0; } } void start_time() { st=system_clock((FLT*)0); } FLT end_time() { return (system_clock((FLT*)0)-st); }
GB_unaryop__identity_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_uint32 // op(A') function: GB_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_uint32 ( bool *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_uint32 // op(A') function: GB_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_uint32 ( bool *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_uint32 // op(A') function: GB_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_uint32 ( bool *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriately. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C L A H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLAHEImage() is a variant of adaptive histogram equalization in which the % contrast amplification is limited, so as to reduce this problem of noise % amplification. % % Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in % "Graphics Gems IV", Academic Press, 1994. % % The format of the CLAHEImage method is: % % MagickBooleanType CLAHEImage(Image *image,const size_t width, % const size_t height,const size_t number_bins,const double clip_limit, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the tile divisions to use in horizontal direction. % % o height: the height of the tile divisions to use in vertical direction. % % o number_bins: number of bins for histogram ("dynamic range"). % % o clip_limit: contrast limit for localised changes in contrast. A limit % less than 1 results in standard non-contrast limited AHE. % % o exception: return any errors or warnings in this structure. % */ typedef struct _RangeInfo { unsigned short min, max; } RangeInfo; static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins, size_t *histogram) { #define NumberCLAHEGrays (65536) register ssize_t i; size_t cumulative_excess, previous_excess, step; ssize_t excess; /* Compute total number of excess pixels. */ cumulative_excess=0; for (i=0; i < (ssize_t) number_bins; i++) { excess=(ssize_t) histogram[i]-(ssize_t) clip_limit; if (excess > 0) cumulative_excess+=excess; } /* Clip histogram and redistribute excess pixels across all bins. */ step=cumulative_excess/number_bins; excess=(ssize_t) (clip_limit-step); for (i=0; i < (ssize_t) number_bins; i++) { if ((double) histogram[i] > clip_limit) histogram[i]=(size_t) clip_limit; else if ((ssize_t) histogram[i] > excess) { cumulative_excess-=histogram[i]-excess; histogram[i]=(size_t) clip_limit; } else { cumulative_excess-=step; histogram[i]+=step; } } /* Redistribute remaining excess. */ do { register size_t *p; size_t *q; previous_excess=cumulative_excess; p=histogram; q=histogram+number_bins; while ((cumulative_excess != 0) && (p < q)) { step=number_bins/cumulative_excess; if (step < 1) step=1; for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step) if ((double) *p < clip_limit) { (*p)++; cumulative_excess--; } p++; } } while ((cumulative_excess != 0) && (cumulative_excess < previous_excess)); } static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const size_t number_bins, const unsigned short *lut,const unsigned short *pixels,size_t *histogram) { register const unsigned short *p; register ssize_t i; /* Classify the pixels into a gray histogram. */ for (i=0; i < (ssize_t) number_bins; i++) histogram[i]=0L; p=pixels; for (i=0; i < (ssize_t) tile_info->height; i++) { const unsigned short *q; q=p+tile_info->width; while (p < q) histogram[lut[*p++]]++; q+=clahe_info->width; p=q-tile_info->width; } } static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12, const size_t *Q22,const size_t *Q11,const size_t *Q21, const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels) { ssize_t y; unsigned short intensity; /* Bilinear interpolate four tiles to eliminate boundary artifacts. */ for (y=(ssize_t) tile->height; y > 0; y--) { register ssize_t x; for (x=(ssize_t) tile->width; x > 0; x--) { intensity=lut[*pixels]; *pixels++=(unsigned short ) (PerceptibleReciprocal((double) tile->width* tile->height)*(y*(x*Q12[intensity]+(tile->width-x)*Q22[intensity])+ (tile->height-y)*(x*Q11[intensity]+(tile->width-x)*Q21[intensity]))); } pixels+=(clahe_info->width-tile->width); } } static void GenerateCLAHELut(const RangeInfo *range_info, const size_t number_bins,unsigned short *lut) { ssize_t i; unsigned short delta; /* Scale input image [intensity min,max] to [0,number_bins-1]. */ delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1); for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++) lut[i]=(unsigned short) ((i-range_info->min)/delta); } static void MapCLAHEHistogram(const RangeInfo *range_info, const size_t number_bins,const size_t number_pixels,size_t *histogram) { double scale, sum; register ssize_t i; /* Rescale histogram to range [min-intensity .. max-intensity]. */ scale=(double) (range_info->max-range_info->min)/number_pixels; sum=0.0; for (i=0; i < (ssize_t) number_bins; i++) { sum+=histogram[i]; histogram[i]=(size_t) (range_info->min+scale*sum); if (histogram[i] > range_info->max) histogram[i]=range_info->max; } } static MagickBooleanType CLAHE(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const RangeInfo *range_info, const size_t number_bins,const double clip_limit,unsigned short *pixels) { MemoryInfo *tile_cache; register unsigned short *p; size_t limit, *tiles; ssize_t y; unsigned short *lut; /* Constrast limited adapted histogram equalization. */ if (clip_limit == 1.0) return(MagickTrue); tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y, number_bins*sizeof(*tiles)); if (tile_cache == (MemoryInfo *) NULL) return(MagickFalse); lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut)); if (lut == (unsigned short *) NULL) { tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickFalse); } tiles=(size_t *) GetVirtualMemoryBlob(tile_cache); limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins); if (limit < 1UL) limit=1UL; /* Generate greylevel mappings for each tile. */ GenerateCLAHELut(range_info,number_bins,lut); p=pixels; for (y=0; y < (ssize_t) clahe_info->y; y++) { register ssize_t x; for (x=0; x < (ssize_t) clahe_info->x; x++) { size_t *histogram; histogram=tiles+(number_bins*(y*clahe_info->x+x)); GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram); ClipCLAHEHistogram((double) limit,number_bins,histogram); MapCLAHEHistogram(range_info,number_bins,tile_info->width* tile_info->height,histogram); p+=tile_info->width; } p+=clahe_info->width*(tile_info->height-1); } /* Interpolate greylevel mappings to get CLAHE image. */ p=pixels; for (y=0; y <= (ssize_t) clahe_info->y; y++) { OffsetInfo offset; RectangleInfo tile; register ssize_t x; tile.height=tile_info->height; tile.y=y-1; offset.y=tile.y+1; if (y == 0) { /* Top row. */ tile.height=tile_info->height >> 1; tile.y=0; offset.y=0; } else if (y == (ssize_t) clahe_info->y) { /* Bottom row. */ tile.height=(tile_info->height+1) >> 1; tile.y=clahe_info->y-1; offset.y=tile.y; } for (x=0; x <= (ssize_t) clahe_info->x; x++) { tile.width=tile_info->width; tile.x=x-1; offset.x=tile.x+1; if (x == 0) { /* Left column. */ tile.width=tile_info->width >> 1; tile.x=0; offset.x=0; } else if (x == (ssize_t) clahe_info->x) { /* Right column. */ tile.width=(tile_info->width+1) >> 1; tile.x=clahe_info->x-1; offset.x=tile.x; } InterpolateCLAHE(clahe_info, tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */ tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */ tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */ tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */ &tile,lut,p); p+=tile.width; } p+=clahe_info->width*(tile.height-1); } lut=(unsigned short *) RelinquishMagickMemory(lut); tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickTrue); } MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width, const size_t height,const size_t number_bins,const double clip_limit, ExceptionInfo *exception) { #define CLAHEImageTag "CLAHE/Image" CacheView *image_view; ColorspaceType colorspace; MagickBooleanType status; MagickOffsetType progress; MemoryInfo *pixel_cache; RangeInfo range_info; RectangleInfo clahe_info, tile_info; size_t n; ssize_t y; unsigned short *pixels; /* Configure CLAHE parameters. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); range_info.min=0; range_info.max=NumberCLAHEGrays-1; tile_info.width=width; if (tile_info.width == 0) tile_info.width=image->columns >> 3; tile_info.height=height; if (tile_info.height == 0) tile_info.height=image->rows >> 3; tile_info.x=0; if ((image->columns % tile_info.width) != 0) tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width); tile_info.y=0; if ((image->rows % tile_info.height) != 0) tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height); clahe_info.width=image->columns+tile_info.x; clahe_info.height=image->rows+tile_info.y; clahe_info.x=(ssize_t) clahe_info.width/tile_info.width; clahe_info.y=(ssize_t) clahe_info.height/tile_info.height; pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height* sizeof(*pixels)); if (pixel_cache == (MemoryInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache); colorspace=image->colorspace; if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse) { pixel_cache=RelinquishVirtualMemory(pixel_cache); return(MagickFalse); } /* Initialize CLAHE pixels. */ image_view=AcquireVirtualCacheView(image,exception); progress=0; status=MagickTrue; n=0; for (y=0; y < (ssize_t) clahe_info.height; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y- (tile_info.y >> 1),clahe_info.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) clahe_info.width; x++) { pixels[n++]=ScaleQuantumToShort(p[0]); p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ? (size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels); if (status == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); /* Push CLAHE pixels to CLAHE image. */ image_view=AcquireAuthenticCacheView(image,exception); n=clahe_info.width*(tile_info.y >> 1); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } n+=tile_info.x >> 1; for (x=0; x < (ssize_t) image->columns; x++) { q[0]=ScaleShortToQuantum(pixels[n++]); q+=GetPixelChannels(image); } n+=(clahe_info.width-image->columns-(tile_info.x >> 1)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); pixel_cache=RelinquishVirtualMemory(pixel_cache); if (TransformImageColorspace(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); status=InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black)); white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,0,0,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum *magick_restrict r; GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); } SetPixelViaPixelInfo(enhance_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=(double) p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(intensity))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) memset(black,0,sizeof(*black)); (void) memset(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaImageTag "Gamma/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,PerceptibleReciprocal(gamma)))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType) q[j]))]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double area, offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; pixel2=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel3=zero; area=point.y; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel3); offset+=cube_size; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel4); pixel=zero; area=point.z; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, area,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriately. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (IsPixelGray(image,q) == MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e B a l a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteBalanceImage() applies white balancing to an image according to a % grayworld assumption in the LAB colorspace. % % The format of the WhiteBalanceImage method is: % % MagickBooleanType WhiteBalanceImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteBalanceImage(Image *image, ExceptionInfo *exception) { #define WhiteBalanceImageTag "WhiteBalance/Image" CacheView *image_view; const char *artifact; double a_mean, b_mean; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* White balance image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=TransformImageColorspace(image,LabColorspace,exception); a_mean=0.0; b_mean=0.0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { a_mean+=QuantumScale*GetPixela(image,p)-0.5; b_mean+=QuantumScale*GetPixelb(image,p)-0.5; p+=GetPixelChannels(image); } } a_mean/=((double) image->columns*image->rows); b_mean/=((double) image->columns*image->rows); progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; /* Scale the chroma distance shifted according to amount of luminance. */ a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean; b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean; SetPixela(image,ClampToQuantum(a),q); SetPixelb(image,ClampToQuantum(b),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); artifact=GetImageArtifact(image,"white-balance:vibrance"); if (artifact != (const char *) NULL) { ChannelType channel_mask; double black_point; GeometryInfo geometry_info; MagickStatusType flags; /* Level the a & b channels. */ flags=ParseGeometry(artifact,&geometry_info); black_point=geometry_info.rho; if ((flags & PercentValue) != 0) black_point*=(double) (QuantumRange/100.0); channel_mask=SetImageChannelMask(image,aChannel | bChannel); status&=LevelImage(image,black_point,(double) QuantumRange-black_point, 1.0,exception); (void) SetImageChannelMask(image,channel_mask); } status&=TransformImageColorspace(image,sRGBColorspace,exception); return(status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A u t o G a m m a I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AutoGammaImage() extract the 'mean' from the image and adjust * the image % to try make set its gamma appropriately. % % The format of * the AutoGammaImage method is: % % MagickBooleanType * AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: The image to auto-level % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image * image, ExceptionInfo * exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean = log(0.5); if (image->channel_mask == DefaultChannels) { /* * Apply gamma correction equally across all given channels. */ (void)GetImageMean(image, &mean, &sans, exception); gamma = log(mean * QuantumScale) / log_mean; return (LevelImage(image, 0.0, (double)QuantumRange, gamma, exception)); } /* * Auto-gamma each channel separately. */ status = MagickTrue; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask = SetImageChannelMask(image, (ChannelType) (1UL << i)); status = GetImageMean(image, &mean, &sans, exception); gamma = log(mean * QuantumScale) / log_mean; status &= LevelImage(image, 0.0, (double)QuantumRange, gamma, exception); (void)SetImageChannelMask(image, channel_mask); if (status == MagickFalse) break; } return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A u t o L e v e l I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AutoLevelImage() adjusts the levels of a particular image * channel by % scaling the minimum and maximum values to the full quantum * range. % % The format of the LevelImage method is: % % * MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % * % A description of each parameter follows: % % o image: The image to * auto-level % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image * image, ExceptionInfo * exception) { return (MinMaxStretchImage(image, 0.0, 0.0, 1.0, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % B r i g h t n e s s C o n t r a s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % BrightnessContrastImage() changes the brightness and/or contrast * of an % image. It converts the brightness and contrast parameters into * slope and % intercept and calls a polynomical function to apply to the * image. % % The format of the BrightnessContrastImage method is: % % * MagickBooleanType BrightnessContrastImage(Image *image, % const * double brightness,const double contrast,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * brightness: the brightness percent (-100 .. 100). % % o contrast: the * contrast percent (-100 .. 100). % % o exception: return any errors or * warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image * image, const double brightness, const double contrast, ExceptionInfo * exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* * Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); alpha = contrast; slope = tan((double)(MagickPI * (alpha / 100.0 + 1.0) / 4.0)); if (slope < 0.0) slope = 0.0; intercept = brightness / 100.0 + ((100 - brightness) / 200.0) * (1.0 - slope); coefficients[0] = slope; coefficients[1] = intercept; status = FunctionImage(image, PolynomialFunction, 2, coefficients, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C L A H E I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CLAHEImage() is a variant of adaptive histogram equalization in * which the % contrast amplification is limited, so as to reduce this * problem of noise % amplification. % % Adapted from implementation by * Karel Zuiderveld, karel@cv.ruu.nl in % "Graphics Gems IV", Academic * Press, 1994. % % The format of the CLAHEImage method is: % % * MagickBooleanType CLAHEImage(Image *image,const size_t width, % * const size_t height,const size_t number_bins,const double clip_limit, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o width: the width of the tile divisions * to use in horizontal direction. % % o height: the height of the tile * divisions to use in vertical direction. % % o number_bins: number of * bins for histogram ("dynamic range"). % % o clip_limit: contrast limit * for localised changes in contrast. A limit % less than 1 results in * standard non-contrast limited AHE. % % o exception: return any errors * or warnings in this structure. % */ typedef struct _RangeInfo { unsigned short min, max; } RangeInfo; static void ClipCLAHEHistogram(const double clip_limit, const size_t number_bins, size_t * histogram) { #define NumberCLAHEGrays (65536) register ssize_t i; size_t cumulative_excess, previous_excess, step; ssize_t excess; /* * Compute total number of excess pixels. */ cumulative_excess = 0; for (i = 0; i < (ssize_t) number_bins; i++) { excess = (ssize_t) histogram[i] - (ssize_t) clip_limit; if (excess > 0) cumulative_excess += excess; } /* * Clip histogram and redistribute excess pixels across all bins. */ step = cumulative_excess / number_bins; excess = (ssize_t) (clip_limit - step); for (i = 0; i < (ssize_t) number_bins; i++) { if ((double)histogram[i] > clip_limit) histogram[i] = (size_t) clip_limit; else if ((ssize_t) histogram[i] > excess) { cumulative_excess -= histogram[i] - excess; histogram[i] = (size_t) clip_limit; } else { cumulative_excess -= step; histogram[i] += step; } } /* * Redistribute remaining excess. */ do { register size_t * p; size_t * q; previous_excess = cumulative_excess; p = histogram; q = histogram + number_bins; while ((cumulative_excess != 0) && (p < q)) { step = number_bins / cumulative_excess; if (step < 1) step = 1; for (p = histogram; (p < q) && (cumulative_excess != 0); p += step) if ((double)*p < clip_limit) { (*p)++; cumulative_excess--; } p++; } } while ((cumulative_excess != 0) && (cumulative_excess < previous_excess)); } static void GenerateCLAHEHistogram(const RectangleInfo * clahe_info, const RectangleInfo * tile_info, const size_t number_bins, const unsigned short *lut, const unsigned short *pixels, size_t * histogram) { register const unsigned short *p; register ssize_t i; /* * Classify the pixels into a gray histogram. */ for (i = 0; i < (ssize_t) number_bins; i++) histogram[i] = 0L; p = pixels; for (i = 0; i < (ssize_t) tile_info->height; i++) { const unsigned short *q; q = p + tile_info->width; while (p < q) histogram[lut[*p++]]++; q += clahe_info->width; p = q - tile_info->width; } } static void InterpolateCLAHE(const RectangleInfo * clahe_info, const size_t * Q12, const size_t * Q22, const size_t * Q11, const size_t * Q21, const RectangleInfo * tile, const unsigned short *lut, unsigned short *pixels) { ssize_t y; unsigned short intensity; /* * Bilinear interpolate four tiles to eliminate boundary artifacts. */ for (y = (ssize_t) tile->height; y > 0; y--) { register ssize_t x; for (x = (ssize_t) tile->width; x > 0; x--) { intensity = lut[*pixels]; *pixels++ = (unsigned short)(PerceptibleReciprocal((double)tile->width * tile->height) * (y * (x * Q12[intensity] + (tile->width - x) * Q22[intensity]) + (tile->height - y) * (x * Q11[intensity] + (tile->width - x) * Q21[intensity]))); } pixels += (clahe_info->width - tile->width); } } static void GenerateCLAHELut(const RangeInfo * range_info, const size_t number_bins, unsigned short *lut) { ssize_t i; unsigned short delta; /* * Scale input image [intensity min,max] to [0,number_bins-1]. */ delta = (unsigned short)((range_info->max - range_info->min) / number_bins + 1); for (i = (ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++) lut[i] = (unsigned short)((i - range_info->min) / delta); } static void MapCLAHEHistogram(const RangeInfo * range_info, const size_t number_bins, const size_t number_pixels, size_t * histogram) { double scale, sum; register ssize_t i; /* * Rescale histogram to range [min-intensity .. max-intensity]. */ scale = (double)(range_info->max - range_info->min) / number_pixels; sum = 0.0; for (i = 0; i < (ssize_t) number_bins; i++) { sum += histogram[i]; histogram[i] = (size_t) (range_info->min + scale * sum); if (histogram[i] > range_info->max) histogram[i] = range_info->max; } } static MagickBooleanType CLAHE(const RectangleInfo * clahe_info, const RectangleInfo * tile_info, const RangeInfo * range_info, const size_t number_bins, const double clip_limit, unsigned short *pixels) { MemoryInfo * tile_cache; register unsigned short *p; size_t limit, *tiles; ssize_t y; unsigned short *lut; /* * Constrast limited adapted histogram equalization. */ if (clip_limit == 1.0) return (MagickTrue); tile_cache = AcquireVirtualMemory((size_t) clahe_info->x * clahe_info->y, number_bins * sizeof(*tiles)); if (tile_cache == (MemoryInfo *) NULL) return (MagickFalse); lut = (unsigned short *)AcquireQuantumMemory(NumberCLAHEGrays, sizeof(*lut)); if (lut == (unsigned short *)NULL) { tile_cache = RelinquishVirtualMemory(tile_cache); return (MagickFalse); } tiles = (size_t *) GetVirtualMemoryBlob(tile_cache); limit = (size_t) (clip_limit * (tile_info->width * tile_info->height) / number_bins); if (limit < 1UL) limit = 1UL; /* * Generate greylevel mappings for each tile. */ GenerateCLAHELut(range_info, number_bins, lut); p = pixels; for (y = 0; y < (ssize_t) clahe_info->y; y++) { register ssize_t x; for (x = 0; x < (ssize_t) clahe_info->x; x++) { size_t * histogram; histogram = tiles + (number_bins * (y * clahe_info->x + x)); GenerateCLAHEHistogram(clahe_info, tile_info, number_bins, lut, p, histogram); ClipCLAHEHistogram((double)limit, number_bins, histogram); MapCLAHEHistogram(range_info, number_bins, tile_info->width * tile_info->height, histogram); p += tile_info->width; } p += clahe_info->width * (tile_info->height - 1); } /* * Interpolate greylevel mappings to get CLAHE image. */ p = pixels; for (y = 0; y <= (ssize_t) clahe_info->y; y++) { OffsetInfo offset; RectangleInfo tile; register ssize_t x; tile.height = tile_info->height; tile.y = y - 1; offset.y = tile.y + 1; if (y == 0) { /* * Top row. */ tile.height = tile_info->height >> 1; tile.y = 0; offset.y = 0; } else if (y == (ssize_t) clahe_info->y) { /* * Bottom row. */ tile.height = (tile_info->height + 1) >> 1; tile.y = clahe_info->y - 1; offset.y = tile.y; } for (x = 0; x <= (ssize_t) clahe_info->x; x++) { tile.width = tile_info->width; tile.x = x - 1; offset.x = tile.x + 1; if (x == 0) { /* * Left column. */ tile.width = tile_info->width >> 1; tile.x = 0; offset.x = 0; } else if (x == (ssize_t) clahe_info->x) { /* * Right column. */ tile.width = (tile_info->width + 1) >> 1; tile.x = clahe_info->x - 1; offset.x = tile.x; } InterpolateCLAHE(clahe_info, tiles + (number_bins * (tile.y * clahe_info->x + tile.x)), /* Q12 */ tiles + (number_bins * (tile.y * clahe_info->x + offset.x)), /* Q22 */ tiles + (number_bins * (offset.y * clahe_info->x + tile.x)), /* Q11 */ tiles + (number_bins * (offset.y * clahe_info->x + offset.x)), /* Q21 */ &tile, lut, p); p += tile.width; } p += clahe_info->width * (tile.height - 1); } lut = (unsigned short *)RelinquishMagickMemory(lut); tile_cache = RelinquishVirtualMemory(tile_cache); return (MagickTrue); } MagickExport MagickBooleanType CLAHEImage(Image * image, const size_t width, const size_t height, const size_t number_bins, const double clip_limit, ExceptionInfo * exception) { #define CLAHEImageTag "CLAHE/Image" CacheView * image_view; ColorspaceType colorspace; MagickBooleanType status; MagickOffsetType progress; MemoryInfo * pixel_cache; RangeInfo range_info; RectangleInfo clahe_info, tile_info; size_t n; ssize_t y; unsigned short *pixels; /* * Configure CLAHE parameters. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); range_info.min = 0; range_info.max = NumberCLAHEGrays - 1; tile_info.width = width; if (tile_info.width == 0) tile_info.width = image->columns >> 3; tile_info.height = height; if (tile_info.height == 0) tile_info.height = image->rows >> 3; tile_info.x = 0; if ((image->columns % tile_info.width) != 0) tile_info.x = (ssize_t) tile_info.width - (image->columns % tile_info.width); tile_info.y = 0; if ((image->rows % tile_info.height) != 0) tile_info.y = (ssize_t) tile_info.height - (image->rows % tile_info.height); clahe_info.width = image->columns + tile_info.x; clahe_info.height = image->rows + tile_info.y; clahe_info.x = (ssize_t) clahe_info.width / tile_info.width; clahe_info.y = (ssize_t) clahe_info.height / tile_info.height; pixel_cache = AcquireVirtualMemory(clahe_info.width, clahe_info.height * sizeof(*pixels)); if (pixel_cache == (MemoryInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); pixels = (unsigned short *)GetVirtualMemoryBlob(pixel_cache); colorspace = image->colorspace; if (TransformImageColorspace(image, LabColorspace, exception) == MagickFalse) { pixel_cache = RelinquishVirtualMemory(pixel_cache); return (MagickFalse); } /* * Initialize CLAHE pixels. */ image_view = AcquireVirtualCacheView(image, exception); progress = 0; status = MagickTrue; n = 0; for (y = 0; y < (ssize_t) clahe_info.height; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, -(tile_info.x >> 1), y - (tile_info.y >> 1), clahe_info.width, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) clahe_info.width; x++) { pixels[n++] = ScaleQuantumToShort(p[0]); p += GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, CLAHEImageTag, progress, 2 * GetPixelChannels(image)); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); status = CLAHE(&clahe_info, &tile_info, &range_info, number_bins == 0 ? (size_t) 128 : MagickMin(number_bins, 256), clip_limit, pixels); if (status == MagickFalse) (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); /* * Push CLAHE pixels to CLAHE image. */ image_view = AcquireAuthenticCacheView(image, exception); n = clahe_info.width * (tile_info.y >> 1); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } n += tile_info.x >> 1; for (x = 0; x < (ssize_t) image->columns; x++) { q[0] = ScaleShortToQuantum(pixels[n++]); q += GetPixelChannels(image); } n += (clahe_info.width - image->columns - (tile_info.x >> 1)); if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, CLAHEImageTag, progress, 2 * GetPixelChannels(image)); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); pixel_cache = RelinquishVirtualMemory(pixel_cache); if (TransformImageColorspace(image, colorspace, exception) == MagickFalse) status = MagickFalse; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l u t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ClutImage() replaces each color value in the given image, by * using it as an % index to lookup a replacement color value in a Color * Look UP Table in the % form of an image. The values are extracted along * a diagonal of the CLUT % image so either a horizontal or vertial gradient * image can be used. % % Typically this is used to either re-color a * gray-scale image according to a % color gradient in the CLUT image, or to * perform a freeform histogram % (level) adjustment according to the * (typically gray-scale) gradient in the % CLUT image. % % When the * 'channel' mask includes the matte/alpha transparency channel but % one * image has no such channel it is assumed that that image is a simple % * gray-scale image that will effect the alpha channel values, either for % * gray-scale coloring (with transparent or semi-transparent colors), or % a * histogram adjustment of existing alpha channel values. If both images % * have matte channels, direct and normal indexing is applied, which is * rarely % used. % % The format of the ClutImage method is: % % * MagickBooleanType ClutImage(Image *image,Image *clut_image, % const * PixelInterpolateMethod method,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image, which is replaced by * indexed CLUT values % % o clut_image: the color lookup table image for * replacement color values. % % o method: the pixel interpolation method. * % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image * image, const Image * clut_image, const PixelInterpolateMethod method, ExceptionInfo * exception) { #define ClutImageTag "Clut/Image" CacheView * clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo * clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void)SetImageColorspace(image, sRGBColorspace, exception); clut_map = (PixelInfo *) AcquireQuantumMemory(MaxMap + 1UL, sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); /* * Clut image. */ status = MagickTrue; progress = 0; adjust = (ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view = AcquireVirtualCacheView(clut_image, exception); for (i = 0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image, clut_map + i); status = InterpolatePixelInfo(clut_image, clut_view, method, (double)i * (clut_image->columns - adjust) / MaxMap, (double)i * (clut_image->rows - adjust) / MaxMap, clut_map + i, exception); if (status == MagickFalse) break; } clut_view = DestroyCacheView(clut_view); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } GetPixelInfo(image, &pixel); for (x = 0; x < (ssize_t) image->columns; x++) { PixelTrait traits; GetPixelInfoPixel(image, q, &pixel); traits = GetPixelChannelTraits(image, RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits = GetPixelChannelTraits(image, GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits = GetPixelChannelTraits(image, BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits = GetPixelChannelTraits(image, BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits = GetPixelChannelTraits(image, AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, ClutImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); clut_map = (PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void)SetImageAlphaChannel(image, ActivateAlphaChannel, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o l o r D e c i s i o n L i s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ColorDecisionListImage() accepts a lightweight Color Correction * Collection % (CCC) file which solely contains one or more color * corrections and applies % the correction to the image. Here is a sample * CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % * <ColorCorrection id="cc03345"> % <SOPNode> % * <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 * </Offset> % <Power> 1.0 0.8 1.5 </Power> % * </SOPNode> % <SATNode> % <Saturation> * 0.85 </Saturation> % </SATNode> % * </ColorCorrection> % </ColorCorrectionCollection> % % which includes * the slop, offset, and power for each of the RGB channels % as well as the * saturation. % % The format of the ColorDecisionListImage method is: % % * MagickBooleanType ColorDecisionListImage(Image *image, % const char * *color_correction_collection,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o * color_correction_collection: the color correction collection in XML. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image * image, const char *color_correction_collection, ExceptionInfo * exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView * image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo * cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo * cc, *ccc, *sat, *sop; /* * Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (color_correction_collection == (const char *)NULL) return (MagickFalse); ccc = NewXMLTree((const char *)color_correction_collection, exception); if (ccc == (XMLTreeInfo *) NULL) return (MagickFalse); cc = GetXMLTreeChild(ccc, "ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc = DestroyXMLTree(ccc); return (MagickFalse); } color_correction.red.slope = 1.0; color_correction.red.offset = 0.0; color_correction.red.power = 1.0; color_correction.green.slope = 1.0; color_correction.green.offset = 0.0; color_correction.green.power = 1.0; color_correction.blue.slope = 1.0; color_correction.blue.offset = 0.0; color_correction.blue.power = 1.0; color_correction.saturation = 0.0; sop = GetXMLTreeChild(cc, "SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo * offset, *power, *slope; slope = GetXMLTreeChild(sop, "Slope"); if (slope != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(slope); p = (const char *)content; for (i = 0; (*p != '\0') && (i < 3); i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); switch (i) { case 0: { color_correction.red.slope = StringToDouble(token, (char **)NULL); break; } case 1: { color_correction.green.slope = StringToDouble(token, (char **)NULL); break; } case 2: { color_correction.blue.slope = StringToDouble(token, (char **)NULL); break; } } } } offset = GetXMLTreeChild(sop, "Offset"); if (offset != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(offset); p = (const char *)content; for (i = 0; (*p != '\0') && (i < 3); i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); switch (i) { case 0: { color_correction.red.offset = StringToDouble(token, (char **)NULL); break; } case 1: { color_correction.green.offset = StringToDouble(token, (char **)NULL); break; } case 2: { color_correction.blue.offset = StringToDouble(token, (char **)NULL); break; } } } } power = GetXMLTreeChild(sop, "Power"); if (power != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(power); p = (const char *)content; for (i = 0; (*p != '\0') && (i < 3); i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); switch (i) { case 0: { color_correction.red.power = StringToDouble(token, (char **)NULL); break; } case 1: { color_correction.green.power = StringToDouble(token, (char **)NULL); break; } case 2: { color_correction.blue.power = StringToDouble(token, (char **)NULL); break; } } } } } sat = GetXMLTreeChild(cc, "SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo * saturation; saturation = GetXMLTreeChild(sat, "Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(saturation); p = (const char *)content; (void)GetNextToken(p, &p, MagickPathExtent, token); color_correction.saturation = StringToDouble(token, (char **)NULL); } } ccc = DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void)LogMagickEvent(TransformEvent, GetMagickModule(), " Color Correction Collection:"); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.red.slope: %g", color_correction.red.slope); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.red.offset: %g", color_correction.red.offset); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.red.power: %g", color_correction.red.power); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.green.slope: %g", color_correction.green.slope); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.green.offset: %g", color_correction.green.offset); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.green.power: %g", color_correction.green.power); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.blue.slope: %g", color_correction.blue.slope); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.blue.offset: %g", color_correction.blue.offset); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.blue.power: %g", color_correction.blue.power); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.saturation: %g", color_correction.saturation); } cdl_map = (PixelInfo *) AcquireQuantumMemory(MaxMap + 1UL, sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); for (i = 0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red = (double)ScaleMapToQuantum((double) (MaxMap * (pow(color_correction.red.slope * i / MaxMap + color_correction.red.offset, color_correction.red.power)))); cdl_map[i].green = (double)ScaleMapToQuantum((double) (MaxMap * (pow(color_correction.green.slope * i / MaxMap + color_correction.green.offset, color_correction.green.power)))); cdl_map[i].blue = (double)ScaleMapToQuantum((double) (MaxMap * (pow(color_correction.blue.slope * i / MaxMap + color_correction.blue.offset, color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Apply transfer function to colormap. */ double luma; luma = 0.21267 f * image->colormap[i].red + 0.71526 * image->colormap[i].green + 0.07217 f * image->colormap[i].blue; image->colormap[i].red = luma + color_correction.saturation * cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red - luma; image->colormap[i].green = luma + color_correction.saturation * cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green - luma; image->colormap[i].blue = luma + color_correction.saturation * cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue - luma; } /* * Apply transfer function to image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { double luma; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { luma = 0.21267 f * GetPixelRed(image, q) + 0.71526 * GetPixelGreen(image, q) + 0.07217 f * GetPixelBlue(image, q); SetPixelRed(image, ClampToQuantum(luma + color_correction.saturation * (cdl_map[ScaleQuantumToMap(GetPixelRed(image, q))].red - luma)), q); SetPixelGreen(image, ClampToQuantum(luma + color_correction.saturation * (cdl_map[ScaleQuantumToMap(GetPixelGreen(image, q))].green - luma)), q); SetPixelBlue(image, ClampToQuantum(luma + color_correction.saturation * (cdl_map[ScaleQuantumToMap(GetPixelBlue(image, q))].blue - luma)), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, ColorDecisionListCorrectImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); cdl_map = (PixelInfo *) RelinquishMagickMemory(cdl_map); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o n t r a s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ContrastImage() enhances the intensity differences between the * lighter and % darker elements of the image. Set sharpen to a MagickTrue * to increase the % image contrast otherwise the contrast is reduced. % % * The format of the ContrastImage method is: % % MagickBooleanType * ContrastImage(Image *image, % const MagickBooleanType * sharpen,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o sharpen: Increase or decrease * image contrast. % % o exception: return any errors or warnings in this * structure. % */ static void Contrast(const int sign, double *red, double *green, double *blue) { double brightness, hue, saturation; /* * Enhance contrast: dark color become darker, light color become * lighter. */ assert(red != (double *)NULL); assert(green != (double *)NULL); assert(blue != (double *)NULL); hue = 0.0; saturation = 0.0; brightness = 0.0; ConvertRGBToHSB(*red, *green, *blue, &hue, &saturation, &brightness); brightness += 0.5 * sign * (0.5 * (sin((double)(MagickPI * (brightness - 0.5))) + 1.0) - brightness); if (brightness > 1.0) brightness = 1.0; else if (brightness < 0.0) brightness = 0.0; ConvertHSBToRGB(hue, saturation, brightness, red, green, blue); } MagickExport MagickBooleanType ContrastImage(Image * image, const MagickBooleanType sharpen, ExceptionInfo * exception) { #define ContrastImageTag "Contrast/Image" CacheView * image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image, sharpen, exception) != MagickFalse) return (MagickTrue); #endif if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); sign = sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* * Contrast enhance colormap. */ for (i = 0; i < (ssize_t) image->colors; i++) { double blue, green, red; red = (double)image->colormap[i].red; green = (double)image->colormap[i].green; blue = (double)image->colormap[i].blue; Contrast(sign, &red, &green, &blue); image->colormap[i].red = (MagickRealType) red; image->colormap[i].green = (MagickRealType) green; image->colormap[i].blue = (MagickRealType) blue; } } /* * Contrast enhance image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { red = (double)GetPixelRed(image, q); green = (double)GetPixelGreen(image, q); blue = (double)GetPixelBlue(image, q); Contrast(sign, &red, &green, &blue); SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, ContrastImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o n t r a s t S t r e t c h I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ContrastStretchImage() is a simple image enhancement technique * that attempts % to improve the contrast in an image by 'stretching' the * range of intensity % values it contains to span a desired range of * values. It differs from the % more sophisticated histogram equalization * in that it can only apply a % linear scaling function to the image pixel * values. As a result the % 'enhancement' is less harsh. % % The format * of the ContrastStretchImage method is: % % MagickBooleanType * ContrastStretchImage(Image *image, % const char * *levels,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o black_point: the black point. * % % o white_point: the white point. % % o levels: Specify the levels * where the black and white points have the % range of 0 to * number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any * errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image * image, const double black_point, const double white_point, ExceptionInfo * exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView * image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageGray(image, exception) != MagickFalse) (void)SetImageColorspace(image, GRAYColorspace, exception); black = (double *)AcquireQuantumMemory(MaxPixelChannels, sizeof(*black)); white = (double *)AcquireQuantumMemory(MaxPixelChannels, sizeof(*white)); histogram = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*histogram)); stretch_map = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*stretch_map)); if ((black == (double *)NULL) || (white == (double *)NULL) || (histogram == (double *)NULL) || (stretch_map == (double *)NULL)) { if (stretch_map != (double *)NULL) stretch_map = (double *)RelinquishMagickMemory(stretch_map); if (histogram != (double *)NULL) histogram = (double *)RelinquishMagickMemory(histogram); if (white != (double *)NULL) white = (double *)RelinquishMagickMemory(white); if (black != (double *)NULL) black = (double *)RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } /* * Form histogram. */ status = MagickTrue; (void)memset(histogram, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*histogram)); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double pixel; pixel = GetPixelIntensity(image, p); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel = (double)p[i]; histogram[GetPixelChannels(image) * ScaleQuantumToMap( ClampToQuantum(pixel)) + i]++; } p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); /* * Find the histogram boundaries by locating the black/white levels. */ for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i] = 0.0; white[i] = MaxRange(QuantumRange); intensity = 0.0; for (j = 0; j <= (ssize_t) MaxMap; j++) { intensity += histogram[GetPixelChannels(image) * j + i]; if (intensity > black_point) break; } black[i] = (double)j; intensity = 0.0; for (j = (ssize_t) MaxMap; j != 0; j--) { intensity += histogram[GetPixelChannels(image) * j + i]; if (intensity > ((double)image->columns * image->rows - white_point)) break; } white[i] = (double)j; } histogram = (double *)RelinquishMagickMemory(histogram); /* * Stretch the histogram to create the stretched image mapping. */ (void)memset(stretch_map, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*stretch_map)); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j = 0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma = PerceptibleReciprocal(white[i] - black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image) * j + i] = 0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image) * j + i] = (double)QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image) * j + i] = (double)ScaleMapToQuantum( (double)(MaxMap * gamma * (j - black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* * Stretch-contrast colormap. */ for (j = 0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, RedPixelChannel); image->colormap[j].red = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red)) + i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, GreenPixelChannel); image->colormap[j].green = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green)) + i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, BluePixelChannel); image->colormap[j].blue = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue)) + i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, AlphaPixelChannel); image->colormap[j].alpha = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha)) + i]; } } } /* * Stretch-contrast image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j] = ClampToQuantum(stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(q[j]) + j]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, ContrastStretchImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); stretch_map = (double *)RelinquishMagickMemory(stretch_map); white = (double *)RelinquishMagickMemory(white); black = (double *)RelinquishMagickMemory(black); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % E n h a n c e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % EnhanceImage() applies a digital filter that improves the * quality of a % noisy image. % % The format of the EnhanceImage method * is: % % Image *EnhanceImage(const Image *image,ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * EnhanceImage(const Image * image, ExceptionInfo * exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView * enhance_view, *image_view; Image * enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Initialize enhanced image attributes. */ assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image = CloneImage(image, 0, 0, MagickTrue, exception); if (enhance_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(enhance_image, DirectClass, exception) == MagickFalse) { enhance_image = DestroyImage(enhance_image); return ((Image *) NULL); } /* * Enhance image. */ status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); enhance_view = AcquireAuthenticCacheView(enhance_image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, -2, y - 2, image->columns + 4, 5, exception); q = QueueCacheViewAuthenticPixels(enhance_view, 0, y, enhance_image->columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } center = (ssize_t) GetPixelChannels(image) * (2 * (image->columns + 4) + 2); GetPixelInfo(image, &pixel); for (x = 0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum * magick_restrict r; GetPixelInfo(image, &aggregate); total_weight = 0.0; GetPixelInfoPixel(image, p + center, &pixel); r = p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r = p + GetPixelChannels(image) * (image->columns + 4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r = p + 2 * GetPixelChannels(image) * (image->columns + 4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r = p + 3 * GetPixelChannels(image) * (image->columns + 4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r = p + 4 * GetPixelChannels(image) * (image->columns + 4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { pixel.red = ((aggregate.red + total_weight / 2.0) / total_weight); pixel.green = ((aggregate.green + total_weight / 2.0) / total_weight); pixel.blue = ((aggregate.blue + total_weight / 2.0) / total_weight); pixel.black = ((aggregate.black + total_weight / 2.0) / total_weight); pixel.alpha = ((aggregate.alpha + total_weight / 2.0) / total_weight); } SetPixelViaPixelInfo(enhance_image, &pixel, q); p += GetPixelChannels(image); q += GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, EnhanceImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } enhance_view = DestroyCacheView(enhance_view); image_view = DestroyCacheView(image_view); if (status == MagickFalse) enhance_image = DestroyImage(enhance_image); return (enhance_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % E q u a l i z e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % EqualizeImage() applies a histogram equalization to the image. % * % The format of the EqualizeImage method is: % % MagickBooleanType * EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o exception: * return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image * image, ExceptionInfo * exception) { #define EqualizeImageTag "Equalize/Image" CacheView * image_view; double black[CompositePixelChannel + 1], *equalize_map, *histogram, *map, white[CompositePixelChannel + 1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image, exception) != MagickFalse) return (MagickTrue); #endif if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); equalize_map = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*equalize_map)); histogram = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*histogram)); map = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*map)); if ((equalize_map == (double *)NULL) || (histogram == (double *)NULL) || (map == (double *)NULL)) { if (map != (double *)NULL) map = (double *)RelinquishMagickMemory(map); if (histogram != (double *)NULL) histogram = (double *)RelinquishMagickMemory(histogram); if (equalize_map != (double *)NULL) equalize_map = (double *)RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } /* * Form histogram. */ status = MagickTrue; (void)memset(histogram, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*histogram)); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity = (double)p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity = GetPixelIntensity(image, p); histogram[GetPixelChannels(image) * ScaleQuantumToMap( ClampToQuantum(intensity)) + i]++; } p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); /* * Integrate the histogram to get the equalization map. */ for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity = 0.0; for (j = 0; j <= (ssize_t) MaxMap; j++) { intensity += histogram[GetPixelChannels(image) * j + i]; map[GetPixelChannels(image) * j + i] = intensity; } } (void)memset(equalize_map, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*equalize_map)); (void)memset(black, 0, sizeof(*black)); (void)memset(white, 0, sizeof(*white)); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i] = map[i]; white[i] = map[GetPixelChannels(image) * MaxMap + i]; if (black[i] != white[i]) for (j = 0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image) * j + i] = (double) ScaleMapToQuantum((double)((MaxMap * (map[ GetPixelChannels(image) * j + i] - black[i])) / (white[i] - black[i]))); } histogram = (double *)RelinquishMagickMemory(histogram); map = (double *)RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* * Equalize colormap. */ for (j = 0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red)) + channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green)) + channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue)) + channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha)) + channel]; } } } /* * Equalize image. */ progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j] = ClampToQuantum(equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(q[j]) + j]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, EqualizeImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); equalize_map = (double *)RelinquishMagickMemory(equalize_map); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G a m m a I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GammaImage() gamma-corrects a particular image channel. The * same % image viewed on different devices will have perceptual differences * in the % way the image's intensities are represented on the screen. * Specify % individual gamma levels for the red, green, and blue channels, * or adjust % all three with the gamma parameter. Values typically range * from 0.8 to 2.3. % % You can also reduce the influence of a particular * channel with a gamma % value of 0. % % The format of the GammaImage * method is: % % MagickBooleanType GammaImage(Image *image,const double * gamma, % ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o level: the image * gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value, const double gamma) { return (value < 0.0 ? value : pow(value, gamma)); } MagickExport MagickBooleanType GammaImage(Image * image, const double gamma, ExceptionInfo * exception) { #define GammaImageTag "Gamma/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; Quantum * gamma_map; register ssize_t i; ssize_t y; /* * Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (gamma == 1.0) return (MagickTrue); gamma_map = (Quantum *) AcquireQuantumMemory(MaxMap + 1UL, sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); (void)memset(gamma_map, 0, (MaxMap + 1) * sizeof(*gamma_map)); if (gamma != 0.0) for (i = 0; i <= (ssize_t) MaxMap; i++) gamma_map[i] = ScaleMapToQuantum((double)(MaxMap * pow((double)i / MaxMap, PerceptibleReciprocal(gamma)))); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Gamma-correct colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; } /* * Gamma-correct image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType) q[j]))]; } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, GammaImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); gamma_map = (Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma *= gamma; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G r a y s c a l e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GrayscaleImage() converts the image to grayscale. % % The * format of the GrayscaleImage method is: % % MagickBooleanType * GrayscaleImage(Image *image, % const PixelIntensityMethod method * ,ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o method: the pixel intensity method. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image * image, const PixelIntensityMethod method, ExceptionInfo * exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image, method, exception) != MagickFalse) { image->intensity = method; image->type = GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return (SetImageColorspace(image, LinearGRAYColorspace, exception)); return (SetImageColorspace(image, GRAYColorspace, exception)); } #endif /* * Grayscale image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; red = (MagickRealType) GetPixelRed(image, q); green = (MagickRealType) GetPixelGreen(image, q); blue = (MagickRealType) GetPixelBlue(image, q); intensity = 0.0; switch (method) { case AveragePixelIntensityMethod: { intensity = (red + green + blue) / 3.0; break; } case BrightnessPixelIntensityMethod: { intensity = MagickMax(MagickMax(red, green), blue); break; } case LightnessPixelIntensityMethod: { intensity = (MagickMin(MagickMin(red, green), blue) + MagickMax(MagickMax(red, green), blue)) / 2.0; break; } case MSPixelIntensityMethod: { intensity = (MagickRealType) (((double)red * red + green * green + blue * blue) / 3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red = EncodePixelGamma(red); green = EncodePixelGamma(green); blue = EncodePixelGamma(blue); } intensity = 0.298839 * red + 0.586811 * green + 0.114350 * blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red = DecodePixelGamma(red); green = DecodePixelGamma(green); blue = DecodePixelGamma(blue); } intensity = 0.298839 * red + 0.586811 * green + 0.114350 * blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red = EncodePixelGamma(red); green = EncodePixelGamma(green); blue = EncodePixelGamma(blue); } intensity = 0.212656 * red + 0.715158 * green + 0.072186 * blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red = DecodePixelGamma(red); green = DecodePixelGamma(green); blue = DecodePixelGamma(blue); } intensity = 0.212656 * red + 0.715158 * green + 0.072186 * blue; break; } case RMSPixelIntensityMethod: { intensity = (MagickRealType) (sqrt((double)red * red + green * green + blue * blue) / sqrt(3.0)); break; } } SetPixelGray(image, ClampToQuantum(intensity), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, GrayscaleImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); image->intensity = method; image->type = GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return (SetImageColorspace(image, LinearGRAYColorspace, exception)); return (SetImageColorspace(image, GRAYColorspace, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % H a l d C l u t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % HaldClutImage() applies a Hald color lookup table to the image. * A Hald % color lookup table is a 3-dimensional color cube mapped to 2 * dimensions. % Create it with the HALD coder. You can apply any color * transformation to % the Hald image and then use this method to apply the * transform to the % image. % % The format of the HaldClutImage method is: * % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image, which is replaced by indexed CLUT values % % o * hald_image: the color lookup table image for replacement color values. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image * image, const Image * hald_image, ExceptionInfo * exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView * hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); /* * Hald clut image. */ status = MagickTrue; progress = 0; length = (size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level = 2; (level * level * level) < length; level++); level *= level; cube_size = level * level; width = (double)hald_image->columns; GetPixelInfo(hald_image, &zero); hald_view = AcquireVirtualCacheView(hald_image, exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double area, offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x = QuantumScale * (level - 1.0) * GetPixelRed(image, q); point.y = QuantumScale * (level - 1.0) * GetPixelGreen(image, q); point.z = QuantumScale * (level - 1.0) * GetPixelBlue(image, q); offset = point.x + level * floor(point.y) + cube_size * floor(point.z); point.x -= floor(point.x); point.y -= floor(point.y); point.z -= floor(point.z); pixel1 = zero; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset, width), floor(offset / width), &pixel1, exception); if (status == MagickFalse) break; pixel2 = zero; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset + level, width), floor((offset + level) / width), &pixel2, exception); if (status == MagickFalse) break; pixel3 = zero; area = point.y; if (hald_image->interpolate == NearestInterpolatePixel) area = (point.y < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel1, pixel1.alpha, &pixel2, pixel2.alpha, area, &pixel3); offset += cube_size; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset, width), floor(offset / width), &pixel1, exception); if (status == MagickFalse) break; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset + level, width), floor((offset + level) / width), &pixel2, exception); if (status == MagickFalse) break; pixel4 = zero; CompositePixelInfoAreaBlend(&pixel1, pixel1.alpha, &pixel2, pixel2.alpha, area, &pixel4); pixel = zero; area = point.z; if (hald_image->interpolate == NearestInterpolatePixel) area = (point.z < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel3, pixel3.alpha, &pixel4, pixel4.alpha, area, &pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image, ClampToQuantum(pixel.red), q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image, ClampToQuantum(pixel.green), q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image, ClampToQuantum(pixel.blue), q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image, ClampToQuantum(pixel.black), q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, HaldClutImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } hald_view = DestroyCacheView(hald_view); image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L e v e l I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LevelImage() adjusts the levels of a particular image channel by * % scaling the colors falling between specified white and black points to * % the full available quantum range. % % The parameters provided * represent the black, and white points. The black % point specifies the * darkest color in the image. Colors darker than the % black point are set * to zero. White point specifies the lightest color in % the image. * Colors brighter than the white point are set to the maximum % quantum * value. % % If a '!' flag is given, map black and white colors to the * given levels % rather than mapping those levels to black and white. See * % LevelizeImage() below. % % Gamma specifies a gamma correction to apply * to the image. % % The format of the LevelImage method is: % % * MagickBooleanType LevelImage(Image *image,const double black_point, % * const double white_point,const double gamma,ExceptionInfo *exception) % % * A description of each parameter follows: % % o image: the image. % % * o black_point: The level to map zero (black) to. % % o white_point: The * level to map QuantumRange (white) to. % % o exception: return any * errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point, const double gamma, const double pixel) { double level_pixel, scale; scale = PerceptibleReciprocal(white_point - black_point); level_pixel = QuantumRange * gamma_pow(scale * ((double)pixel - black_point), PerceptibleReciprocal(gamma)); return (level_pixel); } MagickExport MagickBooleanType LevelImage(Image * image, const double black_point, const double white_point, const double gamma, ExceptionInfo * exception) { #define LevelImageTag "Level/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].alpha)); } /* * Level image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = ClampToQuantum(LevelPixel(black_point, white_point, gamma, (double)q[j])); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, LevelImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); (void)ClampImage(image, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L e v e l i z e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LevelizeImage() applies the reversed LevelImage() operation to * just % the specific channels specified. It compresses the full range of * color % values, so that they lie between the given black and white * points. Gamma is % applied before the values are mapped. % % * LevelizeImage() can be called with by using a +level command line % API * option, or using a '!' on a -level or LevelImage() geometry string. % % * It can be used to de-contrast a greyscale image to the exact levels % * specified. Or by using specific levels for each channel of an image you % * can convert a gray-scale image to any linear color gradient, according to * % those levels. % % The format of the LevelizeImage method is: % % * MagickBooleanType LevelizeImage(Image *image,const double black_point, % * const double white_point,const double gamma,ExceptionInfo *exception) % % * A description of each parameter follows: % % o image: the image. % % * o black_point: The level to map zero (black) to. % % o white_point: The * level to map QuantumRange (white) to. % % o gamma: adjust gamma by this * factor before mapping values. % % o exception: return any errors or * warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image * image, const double black_point, const double white_point, const double gamma, ExceptionInfo * exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)LevelizeValue( image->colormap[i].alpha); } /* * Level image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = LevelizeValue(q[j]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, LevelizeImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L e v e l I m a g e C o l o r s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LevelImageColors() maps the given color to "black" and "white" * values, % linearly spreading out the colors, and level values on a * channel by channel % bases, as per LevelImage(). The given colors allows * you to specify % different level ranges for each of the color channels * separately. % % If the boolean 'invert' is set true the image values will * modifyed in the % reverse direction. That is any existing "black" and * "white" colors in the % image will become the color values given, with * all other values compressed % appropriately. This effectivally maps a * greyscale gradient into the given % color gradient. % % The format of * the LevelImageColors method is: % % MagickBooleanType * LevelImageColors(Image *image, % const PixelInfo *black_color,const * PixelInfo *white_color, % const MagickBooleanType * invert,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o black_color: The color to map * black to/from % % o white_point: The color to map white to/from % % * o invert: if true map the colors (levelize), rather than from (level) % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image * image, const PixelInfo * black_color, const PixelInfo * white_color, const MagickBooleanType invert, ExceptionInfo * exception) { ChannelType channel_mask; MagickStatusType status; /* * Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void)SetImageColorspace(image, sRGBColorspace, exception); status = MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, RedChannel); status &= LevelImage(image, black_color->red, white_color->red, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, GreenChannel); status &= LevelImage(image, black_color->green, white_color->green, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, BlueChannel); status &= LevelImage(image, black_color->blue, white_color->blue, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask = SetImageChannelMask(image, BlackChannel); status &= LevelImage(image, black_color->black, white_color->black, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask = SetImageChannelMask(image, AlphaChannel); status &= LevelImage(image, black_color->alpha, white_color->alpha, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, RedChannel); status &= LevelizeImage(image, black_color->red, white_color->red, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, GreenChannel); status &= LevelizeImage(image, black_color->green, white_color->green, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, BlueChannel); status &= LevelizeImage(image, black_color->blue, white_color->blue, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask = SetImageChannelMask(image, BlackChannel); status &= LevelizeImage(image, black_color->black, white_color->black, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask = SetImageChannelMask(image, AlphaChannel); status &= LevelizeImage(image, black_color->alpha, white_color->alpha, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } } return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L i n e a r S t r e t c h I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LinearStretchImage() discards any pixels below the black point * and above % the white point and levels the remaining pixels. % % The * format of the LinearStretchImage method is: % % MagickBooleanType * LinearStretchImage(Image *image, % const double black_point,const * double white_point, % ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o * black_point: the black point. % % o white_point: the white point. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image * image, const double black_point, const double white_point, ExceptionInfo * exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView * image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* * Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram = (double *)AcquireQuantumMemory(MaxMap + 1UL, sizeof(*histogram)); if (histogram == (double *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); /* * Form histogram. */ (void)memset(histogram, 0, (MaxMap + 1) * sizeof(*histogram)); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { intensity = GetPixelIntensity(image, p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); /* * Find the histogram boundaries by locating the black and white point * levels. */ intensity = 0.0; for (black = 0; black < (ssize_t) MaxMap; black++) { intensity += histogram[black]; if (intensity >= black_point) break; } intensity = 0.0; for (white = (ssize_t) MaxMap; white != 0; white--) { intensity += histogram[white]; if (intensity >= white_point) break; } histogram = (double *)RelinquishMagickMemory(histogram); status = LevelImage(image, (double)ScaleMapToQuantum((MagickRealType) black), (double)ScaleMapToQuantum((MagickRealType) white), 1.0, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % M o d u l a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ModulateImage() lets you control the brightness, saturation, and * hue % of an image. Modulate represents the brightness, saturation, and * hue % as one parameter (e.g. 90,150,100). If the image colorspace is * HSL, the % modulation is lightness, saturation, and hue. For HWB, use * blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. * % % The format of the ModulateImage method is: % % MagickBooleanType * ModulateImage(Image *image,const char *modulate, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o modulate: Define the percent change in brightness, * saturation, and hue. % % o exception: return any errors or warnings in * this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma, const double percent_luma, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red, *green, *blue, &hue, &chroma, &luma); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; chroma *= 0.01 * percent_chroma; luma *= 0.01 * percent_luma; ConvertHCLToRGB(hue, chroma, luma, red, green, blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma, const double percent_luma, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red, *green, *blue, &hue, &chroma, &luma); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; chroma *= 0.01 * percent_chroma; luma *= 0.01 * percent_luma; ConvertHCLpToRGB(hue, chroma, luma, red, green, blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation, const double percent_brightness, double *red, double *green, double *blue) { double brightness, hue, saturation; /* * Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red, *green, *blue, &hue, &saturation, &brightness); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; brightness *= 0.01 * percent_brightness; ConvertHSBToRGB(hue, saturation, brightness, red, green, blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation, const double percent_intensity, double *red, double *green, double *blue) { double intensity, hue, saturation; /* * Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red, *green, *blue, &hue, &saturation, &intensity); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; intensity *= 0.01 * percent_intensity; ConvertHSIToRGB(hue, saturation, intensity, red, green, blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation, const double percent_lightness, double *red, double *green, double *blue) { double hue, lightness, saturation; /* * Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red, *green, *blue, &hue, &saturation, &lightness); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; lightness *= 0.01 * percent_lightness; ConvertHSLToRGB(hue, saturation, lightness, red, green, blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation, const double percent_value, double *red, double *green, double *blue) { double hue, saturation, value; /* * Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red, *green, *blue, &hue, &saturation, &value); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; value *= 0.01 * percent_value; ConvertHSVToRGB(hue, saturation, value, red, green, blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness, const double percent_blackness, double *red, double *green, double *blue) { double blackness, hue, whiteness; /* * Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red, *green, *blue, &hue, &whiteness, &blackness); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; blackness *= 0.01 * percent_blackness; whiteness *= 0.01 * percent_whiteness; ConvertHWBToRGB(hue, whiteness, blackness, red, green, blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma, const double percent_hue, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red, *green, *blue, &luma, &chroma, &hue); luma *= 0.01 * percent_luma; chroma *= 0.01 * percent_chroma; hue += fmod((percent_hue - 100.0), 200.0) / 200.0; ConvertLCHabToRGB(luma, chroma, hue, red, green, blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma, const double percent_hue, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red, *green, *blue, &luma, &chroma, &hue); luma *= 0.01 * percent_luma; chroma *= 0.01 * percent_chroma; hue += fmod((percent_hue - 100.0), 200.0) / 200.0; ConvertLCHuvToRGB(luma, chroma, hue, red, green, blue); } MagickExport MagickBooleanType ModulateImage(Image * image, const char *modulate, ExceptionInfo * exception) { #define ModulateImageTag "Modulate/Image" CacheView * image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* * Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (modulate == (char *)NULL) return (MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void)SetImageColorspace(image, sRGBColorspace, exception); flags = ParseGeometry(modulate, &geometry_info); percent_brightness = geometry_info.rho; percent_saturation = geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation = 100.0; percent_hue = geometry_info.xi; if ((flags & XiValue) == 0) percent_hue = 100.0; colorspace = UndefinedColorspace; artifact = GetImageArtifact(image, "modulate:colorspace"); if (artifact != (const char *)NULL) colorspace = (ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse, artifact); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* * Modulate image colormap. */ red = (double)image->colormap[i].red; green = (double)image->colormap[i].green; blue = (double)image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSBColorspace: { ModulateHSB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSIColorspace: { ModulateHSI(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSVColorspace: { ModulateHSV(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HWBColorspace: { ModulateHWB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } } image->colormap[i].red = red; image->colormap[i].green = green; image->colormap[i].blue = blue; } /* * Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image, percent_brightness, percent_hue, percent_saturation, colorspace, exception) != MagickFalse) return (MagickTrue); #endif status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red; red = (double)GetPixelRed(image, q); green = (double)GetPixelGreen(image, q); blue = (double)GetPixelBlue(image, q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSBColorspace: { ModulateHSB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSVColorspace: { ModulateHSV(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HWBColorspace: { ModulateHWB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } } SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, ModulateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N e g a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NegateImage() negates the colors in the reference image. The * grayscale % option means that only grayscale values within the image are * negated. % % The format of the NegateImage method is: % % * MagickBooleanType NegateImage(Image *image, % const * MagickBooleanType grayscale,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o grayscale: * If MagickTrue, only negate grayscale pixels within the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image * image, const MagickBooleanType grayscale, ExceptionInfo * exception) { #define NegateImageTag "Negate/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Negate colormap. */ if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = QuantumRange - image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = QuantumRange - image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = QuantumRange - image->colormap[i].blue; } /* * Negate image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); if (grayscale != MagickFalse) { for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (IsPixelGray(image, q) == MagickFalse) { q += GetPixelChannels(image); continue; } for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = QuantumRange - q[j]; } q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, NegateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (MagickTrue); } /* * Negate image. */ for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = QuantumRange - q[j]; } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, NegateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N o r m a l i z e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % The NormalizeImage() method enhances the contrast of a color * image by % mapping the darkest 2 percent of all pixel to black and the * brightest % 1 percent to white. % % The format of the NormalizeImage * method is: % % MagickBooleanType NormalizeImage(Image * *image,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o exception: return any errors * or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image * image, ExceptionInfo * exception) { double black_point, white_point; black_point = (double)image->columns * image->rows * 0.0015; white_point = (double)image->columns * image->rows * 0.9995; return (ContrastStretchImage(image, black_point, white_point, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S i g m o i d a l C o n t r a s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a * non-linear % sigmoidal contrast algorithm. Increase the contrast of the * image using a % sigmoidal transfer function without saturating highlights * or shadows. % Contrast indicates how much to increase the contrast (0 is * none; 3 is % typical; 20 is pushing it); mid-point indicates where * midtones fall in the % resultant image (0 is white; 50% is middle-gray; * 100% is black). Set % sharpen to MagickTrue to increase the image * contrast otherwise the contrast % is reduced. % % The format of the * SigmoidalContrastImage method is: % % MagickBooleanType * SigmoidalContrastImage(Image *image, % const MagickBooleanType * sharpen,const char *levels, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * sharpen: Increase or decrease image contrast. % % o contrast: strength * of the contrast, the larger the number the more % 'threshold-like' it * becomes. % % o midpoint: midpoint of the function as a color value 0 to * QuantumRange. % % o exception: return any errors or warnings in this * structure. % */ /* * ImageMagick 6 has a version of this function which uses LUTs. */ /* * Sigmoidal function Sigmoidal with inflexion point moved to b and "slope * constant" set to a. * * The first version, based on the hyperbolic tangent tanh, when combined with * the scaling step, is an exact arithmetic clone of the sigmoid function * based on the logistic curve. The equivalence is based on the identity * * 1/(1+exp(-t)) = (1+tanh(t/2))/2 * * (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled * sigmoidal derivation is invariant under affine transformations of the * ordinate. * * The tanh version is almost certainly more accurate and cheaper. The 0.5 * factor in the argument is to clone the legacy ImageMagick behavior. The * reason for making the define depend on atanh even though it only uses tanh * has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* * Scaled sigmoidal function: * * ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - * Sigmoidal(a,b,0) ) * * See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and * http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit * of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by * zero. This is fixed below by exiting immediately when contrast is small, * leaving the image (or colormap) unmodified. This appears to be safe * because the series expansion of the logistic sigmoidal function around x=b * is * * 1/2-a*(b-x)/4+... * * so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* * Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may * be 0 or 1, the argument of the hyperbolic tangent (resp. logistic * sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when * creating a LUT from in gamut values, hence the branching. In addition, * HDRI may have out of gamut values. InverseScaledSigmoidal is not a * two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is * unavoidable. */ static inline double InverseScaledSigmoidal(const double a, const double b, const double x) { const double sig0 = Sigmoidal(a, b, 0.0); const double sig1 = Sigmoidal(a, b, 1.0); const double argument = (sig1 - sig0) * x + sig0; const double clamped = ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1 + MagickEpsilon ? -1 + MagickEpsilon : (argument > 1 - MagickEpsilon ? 1 - MagickEpsilon : argument) ); return (b + (2.0 / a) * atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : (argument > 1 - MagickEpsilon ? 1 - MagickEpsilon : argument) ); return (b - log(1.0 / clamped - 1.0) / a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image * image, const MagickBooleanType sharpen, const double contrast, const double midpoint, ExceptionInfo * exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); /* * Side effect: may clamp values unless contrast<MagickEpsilon, in which * case nothing is done. */ if (contrast < MagickEpsilon) return (MagickTrue); /* * Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if (sharpen != MagickFalse) for (i = 0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i = 0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* * Sigmoidal-contrast enhance image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; if (sharpen != MagickFalse) q[i] = ScaledSig(q[i]); else q[i] = InverseScaledSig(q[i]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, SigmoidalContrastImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % W h i t e B a l a n c e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % WhiteBalanceImage() applies white balancing to an image * according to a % grayworld assumption in the LAB colorspace. % % The * format of the WhiteBalanceImage method is: % % MagickBooleanType * WhiteBalanceImage(Image *image, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: The image to * auto-level % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType WhiteBalanceImage(Image * image, ExceptionInfo * exception) { #define WhiteBalanceImageTag "WhiteBalance/Image" CacheView * image_view; const char *artifact; double a_mean, b_mean; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * White balance image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = TransformImageColorspace(image, LabColorspace, exception); a_mean = 0.0; b_mean = 0.0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { a_mean += QuantumScale * GetPixela(image, p) - 0.5; b_mean += QuantumScale * GetPixelb(image, p) - 0.5; p += GetPixelChannels(image); } } a_mean /= ((double)image->columns * image->rows); b_mean /= ((double)image->columns * image->rows); progress = 0; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double a, b; /* * Scale the chroma distance shifted according to amount of * luminance. */ a = (double)GetPixela(image, q) - 1.1 * GetPixelL(image, q) * a_mean; b = (double)GetPixelb(image, q) - 1.1 * GetPixelL(image, q) * b_mean; SetPixela(image, ClampToQuantum(a), q); SetPixelb(image, ClampToQuantum(b), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, WhiteBalanceImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); artifact = GetImageArtifact(image, "white-balance:vibrance"); if (artifact != (const char *)NULL) { ChannelType channel_mask; double black_point; GeometryInfo geometry_info; MagickStatusType flags; /* * Level the a & b channels. */ flags = ParseGeometry(artifact, &geometry_info); black_point = geometry_info.rho; if ((flags & PercentValue) != 0) black_point *= (double)(QuantumRange / 100.0); channel_mask = SetImageChannelMask(image, aChannel | bChannel); status &= LevelImage(image, black_point, (double)QuantumRange - black_point, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } status &= TransformImageColorspace(image, sRGBColorspace, exception); return (status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A u t o G a m m a I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AutoGammaImage() extract the 'mean' from the image and adjust * the image % to try make set its gamma appropriately. % % The format of * the AutoGammaImage method is: % % MagickBooleanType * AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: The image to auto-level % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image * image, ExceptionInfo * exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean = log(0.5); if (image->channel_mask == DefaultChannels) { /* * Apply gamma correction equally across all given channels. */ (void)GetImageMean(image, &mean, &sans, exception); gamma = log(mean * QuantumScale) / log_mean; return (LevelImage(image, 0.0, (double)QuantumRange, gamma, exception)); } /* * Auto-gamma each channel separately. */ status = MagickTrue; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask = SetImageChannelMask(image, (ChannelType) (1UL << i)); status = GetImageMean(image, &mean, &sans, exception); gamma = log(mean * QuantumScale) / log_mean; status &= LevelImage(image, 0.0, (double)QuantumRange, gamma, exception); (void)SetImageChannelMask(image, channel_mask); if (status == MagickFalse) break; } return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % A u t o L e v e l I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % AutoLevelImage() adjusts the levels of a particular image * channel by % scaling the minimum and maximum values to the full quantum * range. % % The format of the LevelImage method is: % % * MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % * % A description of each parameter follows: % % o image: The image to * auto-level % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image * image, ExceptionInfo * exception) { return (MinMaxStretchImage(image, 0.0, 0.0, 1.0, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % B r i g h t n e s s C o n t r a s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % BrightnessContrastImage() changes the brightness and/or contrast * of an % image. It converts the brightness and contrast parameters into * slope and % intercept and calls a polynomical function to apply to the * image. % % The format of the BrightnessContrastImage method is: % % * MagickBooleanType BrightnessContrastImage(Image *image, % const * double brightness,const double contrast,ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * brightness: the brightness percent (-100 .. 100). % % o contrast: the * contrast percent (-100 .. 100). % % o exception: return any errors or * warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image * image, const double brightness, const double contrast, ExceptionInfo * exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* * Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); alpha = contrast; slope = tan((double)(MagickPI * (alpha / 100.0 + 1.0) / 4.0)); if (slope < 0.0) slope = 0.0; intercept = brightness / 100.0 + ((100 - brightness) / 200.0) * (1.0 - slope); coefficients[0] = slope; coefficients[1] = intercept; status = FunctionImage(image, PolynomialFunction, 2, coefficients, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C L A H E I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CLAHEImage() is a variant of adaptive histogram equalization in * which the % contrast amplification is limited, so as to reduce this * problem of noise % amplification. % % Adapted from implementation by * Karel Zuiderveld, karel@cv.ruu.nl in % "Graphics Gems IV", Academic * Press, 1994. % % The format of the CLAHEImage method is: % % * MagickBooleanType CLAHEImage(Image *image,const size_t width, % * const size_t height,const size_t number_bins,const double clip_limit, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o width: the width of the tile divisions * to use in horizontal direction. % % o height: the height of the tile * divisions to use in vertical direction. % % o number_bins: number of * bins for histogram ("dynamic range"). % % o clip_limit: contrast limit * for localised changes in contrast. A limit % less than 1 results in * standard non-contrast limited AHE. % % o exception: return any errors * or warnings in this structure. % */ typedef struct _RangeInfo { unsigned short min, max; } RangeInfo; static void ClipCLAHEHistogram(const double clip_limit, const size_t number_bins, size_t * histogram) { #define NumberCLAHEGrays (65536) register ssize_t i; size_t cumulative_excess, previous_excess, step; ssize_t excess; /* * Compute total number of excess pixels. */ cumulative_excess = 0; for (i = 0; i < (ssize_t) number_bins; i++) { excess = (ssize_t) histogram[i] - (ssize_t) clip_limit; if (excess > 0) cumulative_excess += excess; } /* * Clip histogram and redistribute excess pixels across all bins. */ step = cumulative_excess / number_bins; excess = (ssize_t) (clip_limit - step); for (i = 0; i < (ssize_t) number_bins; i++) { if ((double)histogram[i] > clip_limit) histogram[i] = (size_t) clip_limit; else if ((ssize_t) histogram[i] > excess) { cumulative_excess -= histogram[i] - excess; histogram[i] = (size_t) clip_limit; } else { cumulative_excess -= step; histogram[i] += step; } } /* * Redistribute remaining excess. */ do { register size_t * p; size_t * q; previous_excess = cumulative_excess; p = histogram; q = histogram + number_bins; while ((cumulative_excess != 0) && (p < q)) { step = number_bins / cumulative_excess; if (step < 1) step = 1; for (p = histogram; (p < q) && (cumulative_excess != 0); p += step) if ((double)*p < clip_limit) { (*p)++; cumulative_excess--; } p++; } } while ((cumulative_excess != 0) && (cumulative_excess < previous_excess)); } static void GenerateCLAHEHistogram(const RectangleInfo * clahe_info, const RectangleInfo * tile_info, const size_t number_bins, const unsigned short *lut, const unsigned short *pixels, size_t * histogram) { register const unsigned short *p; register ssize_t i; /* * Classify the pixels into a gray histogram. */ for (i = 0; i < (ssize_t) number_bins; i++) histogram[i] = 0L; p = pixels; for (i = 0; i < (ssize_t) tile_info->height; i++) { const unsigned short *q; q = p + tile_info->width; while (p < q) histogram[lut[*p++]]++; q += clahe_info->width; p = q - tile_info->width; } } static void InterpolateCLAHE(const RectangleInfo * clahe_info, const size_t * Q12, const size_t * Q22, const size_t * Q11, const size_t * Q21, const RectangleInfo * tile, const unsigned short *lut, unsigned short *pixels) { ssize_t y; unsigned short intensity; /* * Bilinear interpolate four tiles to eliminate boundary artifacts. */ for (y = (ssize_t) tile->height; y > 0; y--) { register ssize_t x; for (x = (ssize_t) tile->width; x > 0; x--) { intensity = lut[*pixels]; *pixels++ = (unsigned short)(PerceptibleReciprocal((double)tile->width * tile->height) * (y * (x * Q12[intensity] + (tile->width - x) * Q22[intensity]) + (tile->height - y) * (x * Q11[intensity] + (tile->width - x) * Q21[intensity]))); } pixels += (clahe_info->width - tile->width); } } static void GenerateCLAHELut(const RangeInfo * range_info, const size_t number_bins, unsigned short *lut) { ssize_t i; unsigned short delta; /* * Scale input image [intensity min,max] to [0,number_bins-1]. */ delta = (unsigned short)((range_info->max - range_info->min) / number_bins + 1); for (i = (ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++) lut[i] = (unsigned short)((i - range_info->min) / delta); } static void MapCLAHEHistogram(const RangeInfo * range_info, const size_t number_bins, const size_t number_pixels, size_t * histogram) { double scale, sum; register ssize_t i; /* * Rescale histogram to range [min-intensity .. max-intensity]. */ scale = (double)(range_info->max - range_info->min) / number_pixels; sum = 0.0; for (i = 0; i < (ssize_t) number_bins; i++) { sum += histogram[i]; histogram[i] = (size_t) (range_info->min + scale * sum); if (histogram[i] > range_info->max) histogram[i] = range_info->max; } } static MagickBooleanType CLAHE(const RectangleInfo * clahe_info, const RectangleInfo * tile_info, const RangeInfo * range_info, const size_t number_bins, const double clip_limit, unsigned short *pixels) { MemoryInfo * tile_cache; register unsigned short *p; size_t limit, *tiles; ssize_t y; unsigned short *lut; /* * Constrast limited adapted histogram equalization. */ if (clip_limit == 1.0) return (MagickTrue); tile_cache = AcquireVirtualMemory((size_t) clahe_info->x * clahe_info->y, number_bins * sizeof(*tiles)); if (tile_cache == (MemoryInfo *) NULL) return (MagickFalse); lut = (unsigned short *)AcquireQuantumMemory(NumberCLAHEGrays, sizeof(*lut)); if (lut == (unsigned short *)NULL) { tile_cache = RelinquishVirtualMemory(tile_cache); return (MagickFalse); } tiles = (size_t *) GetVirtualMemoryBlob(tile_cache); limit = (size_t) (clip_limit * (tile_info->width * tile_info->height) / number_bins); if (limit < 1UL) limit = 1UL; /* * Generate greylevel mappings for each tile. */ GenerateCLAHELut(range_info, number_bins, lut); p = pixels; for (y = 0; y < (ssize_t) clahe_info->y; y++) { register ssize_t x; for (x = 0; x < (ssize_t) clahe_info->x; x++) { size_t * histogram; histogram = tiles + (number_bins * (y * clahe_info->x + x)); GenerateCLAHEHistogram(clahe_info, tile_info, number_bins, lut, p, histogram); ClipCLAHEHistogram((double)limit, number_bins, histogram); MapCLAHEHistogram(range_info, number_bins, tile_info->width * tile_info->height, histogram); p += tile_info->width; } p += clahe_info->width * (tile_info->height - 1); } /* * Interpolate greylevel mappings to get CLAHE image. */ p = pixels; for (y = 0; y <= (ssize_t) clahe_info->y; y++) { OffsetInfo offset; RectangleInfo tile; register ssize_t x; tile.height = tile_info->height; tile.y = y - 1; offset.y = tile.y + 1; if (y == 0) { /* * Top row. */ tile.height = tile_info->height >> 1; tile.y = 0; offset.y = 0; } else if (y == (ssize_t) clahe_info->y) { /* * Bottom row. */ tile.height = (tile_info->height + 1) >> 1; tile.y = clahe_info->y - 1; offset.y = tile.y; } for (x = 0; x <= (ssize_t) clahe_info->x; x++) { tile.width = tile_info->width; tile.x = x - 1; offset.x = tile.x + 1; if (x == 0) { /* * Left column. */ tile.width = tile_info->width >> 1; tile.x = 0; offset.x = 0; } else if (x == (ssize_t) clahe_info->x) { /* * Right column. */ tile.width = (tile_info->width + 1) >> 1; tile.x = clahe_info->x - 1; offset.x = tile.x; } InterpolateCLAHE(clahe_info, tiles + (number_bins * (tile.y * clahe_info->x + tile.x)), /* Q12 */ tiles + (number_bins * (tile.y * clahe_info->x + offset.x)), /* Q22 */ tiles + (number_bins * (offset.y * clahe_info->x + tile.x)), /* Q11 */ tiles + (number_bins * (offset.y * clahe_info->x + offset.x)), /* Q21 */ &tile, lut, p); p += tile.width; } p += clahe_info->width * (tile.height - 1); } lut = (unsigned short *)RelinquishMagickMemory(lut); tile_cache = RelinquishVirtualMemory(tile_cache); return (MagickTrue); } MagickExport MagickBooleanType CLAHEImage(Image * image, const size_t width, const size_t height, const size_t number_bins, const double clip_limit, ExceptionInfo * exception) { #define CLAHEImageTag "CLAHE/Image" CacheView * image_view; ColorspaceType colorspace; MagickBooleanType status; MagickOffsetType progress; MemoryInfo * pixel_cache; RangeInfo range_info; RectangleInfo clahe_info, tile_info; size_t n; ssize_t y; unsigned short *pixels; /* * Configure CLAHE parameters. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); range_info.min = 0; range_info.max = NumberCLAHEGrays - 1; tile_info.width = width; if (tile_info.width == 0) tile_info.width = image->columns >> 3; tile_info.height = height; if (tile_info.height == 0) tile_info.height = image->rows >> 3; tile_info.x = 0; if ((image->columns % tile_info.width) != 0) tile_info.x = (ssize_t) tile_info.width - (image->columns % tile_info.width); tile_info.y = 0; if ((image->rows % tile_info.height) != 0) tile_info.y = (ssize_t) tile_info.height - (image->rows % tile_info.height); clahe_info.width = image->columns + tile_info.x; clahe_info.height = image->rows + tile_info.y; clahe_info.x = (ssize_t) clahe_info.width / tile_info.width; clahe_info.y = (ssize_t) clahe_info.height / tile_info.height; pixel_cache = AcquireVirtualMemory(clahe_info.width, clahe_info.height * sizeof(*pixels)); if (pixel_cache == (MemoryInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); pixels = (unsigned short *)GetVirtualMemoryBlob(pixel_cache); colorspace = image->colorspace; if (TransformImageColorspace(image, LabColorspace, exception) == MagickFalse) { pixel_cache = RelinquishVirtualMemory(pixel_cache); return (MagickFalse); } /* * Initialize CLAHE pixels. */ image_view = AcquireVirtualCacheView(image, exception); progress = 0; status = MagickTrue; n = 0; for (y = 0; y < (ssize_t) clahe_info.height; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, -(tile_info.x >> 1), y - (tile_info.y >> 1), clahe_info.width, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) clahe_info.width; x++) { pixels[n++] = ScaleQuantumToShort(p[0]); p += GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, CLAHEImageTag, progress, 2 * GetPixelChannels(image)); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); status = CLAHE(&clahe_info, &tile_info, &range_info, number_bins == 0 ? (size_t) 128 : MagickMin(number_bins, 256), clip_limit, pixels); if (status == MagickFalse) (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); /* * Push CLAHE pixels to CLAHE image. */ image_view = AcquireAuthenticCacheView(image, exception); n = clahe_info.width * (tile_info.y >> 1); for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } n += tile_info.x >> 1; for (x = 0; x < (ssize_t) image->columns; x++) { q[0] = ScaleShortToQuantum(pixels[n++]); q += GetPixelChannels(image); } n += (clahe_info.width - image->columns - (tile_info.x >> 1)); if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, CLAHEImageTag, progress, 2 * GetPixelChannels(image)); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); pixel_cache = RelinquishVirtualMemory(pixel_cache); if (TransformImageColorspace(image, colorspace, exception) == MagickFalse) status = MagickFalse; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C l u t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ClutImage() replaces each color value in the given image, by * using it as an % index to lookup a replacement color value in a Color * Look UP Table in the % form of an image. The values are extracted along * a diagonal of the CLUT % image so either a horizontal or vertial gradient * image can be used. % % Typically this is used to either re-color a * gray-scale image according to a % color gradient in the CLUT image, or to * perform a freeform histogram % (level) adjustment according to the * (typically gray-scale) gradient in the % CLUT image. % % When the * 'channel' mask includes the matte/alpha transparency channel but % one * image has no such channel it is assumed that that image is a simple % * gray-scale image that will effect the alpha channel values, either for % * gray-scale coloring (with transparent or semi-transparent colors), or % a * histogram adjustment of existing alpha channel values. If both images % * have matte channels, direct and normal indexing is applied, which is * rarely % used. % % The format of the ClutImage method is: % % * MagickBooleanType ClutImage(Image *image,Image *clut_image, % const * PixelInterpolateMethod method,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image, which is replaced by * indexed CLUT values % % o clut_image: the color lookup table image for * replacement color values. % % o method: the pixel interpolation method. * % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image * image, const Image * clut_image, const PixelInterpolateMethod method, ExceptionInfo * exception) { #define ClutImageTag "Clut/Image" CacheView * clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo * clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void)SetImageColorspace(image, sRGBColorspace, exception); clut_map = (PixelInfo *) AcquireQuantumMemory(MaxMap + 1UL, sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); /* * Clut image. */ status = MagickTrue; progress = 0; adjust = (ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view = AcquireVirtualCacheView(clut_image, exception); for (i = 0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image, clut_map + i); status = InterpolatePixelInfo(clut_image, clut_view, method, (double)i * (clut_image->columns - adjust) / MaxMap, (double)i * (clut_image->rows - adjust) / MaxMap, clut_map + i, exception); if (status == MagickFalse) break; } clut_view = DestroyCacheView(clut_view); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } GetPixelInfo(image, &pixel); for (x = 0; x < (ssize_t) image->columns; x++) { PixelTrait traits; GetPixelInfoPixel(image, q, &pixel); traits = GetPixelChannelTraits(image, RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits = GetPixelChannelTraits(image, GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits = GetPixelChannelTraits(image, BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits = GetPixelChannelTraits(image, BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits = GetPixelChannelTraits(image, AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha = clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image, &pixel, q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, ClutImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); clut_map = (PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void)SetImageAlphaChannel(image, ActivateAlphaChannel, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o l o r D e c i s i o n L i s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ColorDecisionListImage() accepts a lightweight Color Correction * Collection % (CCC) file which solely contains one or more color * corrections and applies % the correction to the image. Here is a sample * CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % * <ColorCorrection id="cc03345"> % <SOPNode> % * <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 * </Offset> % <Power> 1.0 0.8 1.5 </Power> % * </SOPNode> % <SATNode> % <Saturation> * 0.85 </Saturation> % </SATNode> % * </ColorCorrection> % </ColorCorrectionCollection> % % which includes * the slop, offset, and power for each of the RGB channels % as well as the * saturation. % % The format of the ColorDecisionListImage method is: % % * MagickBooleanType ColorDecisionListImage(Image *image, % const char * *color_correction_collection,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o * color_correction_collection: the color correction collection in XML. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image * image, const char *color_correction_collection, ExceptionInfo * exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView * image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo * cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo * cc, *ccc, *sat, *sop; /* * Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (color_correction_collection == (const char *)NULL) return (MagickFalse); ccc = NewXMLTree((const char *)color_correction_collection, exception); if (ccc == (XMLTreeInfo *) NULL) return (MagickFalse); cc = GetXMLTreeChild(ccc, "ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc = DestroyXMLTree(ccc); return (MagickFalse); } color_correction.red.slope = 1.0; color_correction.red.offset = 0.0; color_correction.red.power = 1.0; color_correction.green.slope = 1.0; color_correction.green.offset = 0.0; color_correction.green.power = 1.0; color_correction.blue.slope = 1.0; color_correction.blue.offset = 0.0; color_correction.blue.power = 1.0; color_correction.saturation = 0.0; sop = GetXMLTreeChild(cc, "SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo * offset, *power, *slope; slope = GetXMLTreeChild(sop, "Slope"); if (slope != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(slope); p = (const char *)content; for (i = 0; (*p != '\0') && (i < 3); i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); switch (i) { case 0: { color_correction.red.slope = StringToDouble(token, (char **)NULL); break; } case 1: { color_correction.green.slope = StringToDouble(token, (char **)NULL); break; } case 2: { color_correction.blue.slope = StringToDouble(token, (char **)NULL); break; } } } } offset = GetXMLTreeChild(sop, "Offset"); if (offset != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(offset); p = (const char *)content; for (i = 0; (*p != '\0') && (i < 3); i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); switch (i) { case 0: { color_correction.red.offset = StringToDouble(token, (char **)NULL); break; } case 1: { color_correction.green.offset = StringToDouble(token, (char **)NULL); break; } case 2: { color_correction.blue.offset = StringToDouble(token, (char **)NULL); break; } } } } power = GetXMLTreeChild(sop, "Power"); if (power != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(power); p = (const char *)content; for (i = 0; (*p != '\0') && (i < 3); i++) { (void)GetNextToken(p, &p, MagickPathExtent, token); if (*token == ',') (void)GetNextToken(p, &p, MagickPathExtent, token); switch (i) { case 0: { color_correction.red.power = StringToDouble(token, (char **)NULL); break; } case 1: { color_correction.green.power = StringToDouble(token, (char **)NULL); break; } case 2: { color_correction.blue.power = StringToDouble(token, (char **)NULL); break; } } } } } sat = GetXMLTreeChild(cc, "SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo * saturation; saturation = GetXMLTreeChild(sat, "Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content = GetXMLTreeContent(saturation); p = (const char *)content; (void)GetNextToken(p, &p, MagickPathExtent, token); color_correction.saturation = StringToDouble(token, (char **)NULL); } } ccc = DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void)LogMagickEvent(TransformEvent, GetMagickModule(), " Color Correction Collection:"); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.red.slope: %g", color_correction.red.slope); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.red.offset: %g", color_correction.red.offset); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.red.power: %g", color_correction.red.power); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.green.slope: %g", color_correction.green.slope); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.green.offset: %g", color_correction.green.offset); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.green.power: %g", color_correction.green.power); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.blue.slope: %g", color_correction.blue.slope); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.blue.offset: %g", color_correction.blue.offset); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.blue.power: %g", color_correction.blue.power); (void)LogMagickEvent(TransformEvent, GetMagickModule(), " color_correction.saturation: %g", color_correction.saturation); } cdl_map = (PixelInfo *) AcquireQuantumMemory(MaxMap + 1UL, sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); for (i = 0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red = (double)ScaleMapToQuantum((double) (MaxMap * (pow(color_correction.red.slope * i / MaxMap + color_correction.red.offset, color_correction.red.power)))); cdl_map[i].green = (double)ScaleMapToQuantum((double) (MaxMap * (pow(color_correction.green.slope * i / MaxMap + color_correction.green.offset, color_correction.green.power)))); cdl_map[i].blue = (double)ScaleMapToQuantum((double) (MaxMap * (pow(color_correction.blue.slope * i / MaxMap + color_correction.blue.offset, color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Apply transfer function to colormap. */ double luma; luma = 0.21267 f * image->colormap[i].red + 0.71526 * image->colormap[i].green + 0.07217 f * image->colormap[i].blue; image->colormap[i].red = luma + color_correction.saturation * cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red - luma; image->colormap[i].green = luma + color_correction.saturation * cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green - luma; image->colormap[i].blue = luma + color_correction.saturation * cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue - luma; } /* * Apply transfer function to image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { double luma; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { luma = 0.21267 f * GetPixelRed(image, q) + 0.71526 * GetPixelGreen(image, q) + 0.07217 f * GetPixelBlue(image, q); SetPixelRed(image, ClampToQuantum(luma + color_correction.saturation * (cdl_map[ScaleQuantumToMap(GetPixelRed(image, q))].red - luma)), q); SetPixelGreen(image, ClampToQuantum(luma + color_correction.saturation * (cdl_map[ScaleQuantumToMap(GetPixelGreen(image, q))].green - luma)), q); SetPixelBlue(image, ClampToQuantum(luma + color_correction.saturation * (cdl_map[ScaleQuantumToMap(GetPixelBlue(image, q))].blue - luma)), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, ColorDecisionListCorrectImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); cdl_map = (PixelInfo *) RelinquishMagickMemory(cdl_map); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o n t r a s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ContrastImage() enhances the intensity differences between the * lighter and % darker elements of the image. Set sharpen to a MagickTrue * to increase the % image contrast otherwise the contrast is reduced. % % * The format of the ContrastImage method is: % % MagickBooleanType * ContrastImage(Image *image, % const MagickBooleanType * sharpen,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o sharpen: Increase or decrease * image contrast. % % o exception: return any errors or warnings in this * structure. % */ static void Contrast(const int sign, double *red, double *green, double *blue) { double brightness, hue, saturation; /* * Enhance contrast: dark color become darker, light color become * lighter. */ assert(red != (double *)NULL); assert(green != (double *)NULL); assert(blue != (double *)NULL); hue = 0.0; saturation = 0.0; brightness = 0.0; ConvertRGBToHSB(*red, *green, *blue, &hue, &saturation, &brightness); brightness += 0.5 * sign * (0.5 * (sin((double)(MagickPI * (brightness - 0.5))) + 1.0) - brightness); if (brightness > 1.0) brightness = 1.0; else if (brightness < 0.0) brightness = 0.0; ConvertHSBToRGB(hue, saturation, brightness, red, green, blue); } MagickExport MagickBooleanType ContrastImage(Image * image, const MagickBooleanType sharpen, ExceptionInfo * exception) { #define ContrastImageTag "Contrast/Image" CacheView * image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image, sharpen, exception) != MagickFalse) return (MagickTrue); #endif if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); sign = sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* * Contrast enhance colormap. */ for (i = 0; i < (ssize_t) image->colors; i++) { double blue, green, red; red = (double)image->colormap[i].red; green = (double)image->colormap[i].green; blue = (double)image->colormap[i].blue; Contrast(sign, &red, &green, &blue); image->colormap[i].red = (MagickRealType) red; image->colormap[i].green = (MagickRealType) green; image->colormap[i].blue = (MagickRealType) blue; } } /* * Contrast enhance image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { red = (double)GetPixelRed(image, q); green = (double)GetPixelGreen(image, q); blue = (double)GetPixelBlue(image, q); Contrast(sign, &red, &green, &blue); SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, ContrastImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o n t r a s t S t r e t c h I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ContrastStretchImage() is a simple image enhancement technique * that attempts % to improve the contrast in an image by 'stretching' the * range of intensity % values it contains to span a desired range of * values. It differs from the % more sophisticated histogram equalization * in that it can only apply a % linear scaling function to the image pixel * values. As a result the % 'enhancement' is less harsh. % % The format * of the ContrastStretchImage method is: % % MagickBooleanType * ContrastStretchImage(Image *image, % const char * *levels,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o black_point: the black point. * % % o white_point: the white point. % % o levels: Specify the levels * where the black and white points have the % range of 0 to * number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any * errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image * image, const double black_point, const double white_point, ExceptionInfo * exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView * image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageGray(image, exception) != MagickFalse) (void)SetImageColorspace(image, GRAYColorspace, exception); black = (double *)AcquireQuantumMemory(MaxPixelChannels, sizeof(*black)); white = (double *)AcquireQuantumMemory(MaxPixelChannels, sizeof(*white)); histogram = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*histogram)); stretch_map = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*stretch_map)); if ((black == (double *)NULL) || (white == (double *)NULL) || (histogram == (double *)NULL) || (stretch_map == (double *)NULL)) { if (stretch_map != (double *)NULL) stretch_map = (double *)RelinquishMagickMemory(stretch_map); if (histogram != (double *)NULL) histogram = (double *)RelinquishMagickMemory(histogram); if (white != (double *)NULL) white = (double *)RelinquishMagickMemory(white); if (black != (double *)NULL) black = (double *)RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } /* * Form histogram. */ status = MagickTrue; (void)memset(histogram, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*histogram)); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double pixel; pixel = GetPixelIntensity(image, p); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel = (double)p[i]; histogram[GetPixelChannels(image) * ScaleQuantumToMap( ClampToQuantum(pixel)) + i]++; } p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); /* * Find the histogram boundaries by locating the black/white levels. */ for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i] = 0.0; white[i] = MaxRange(QuantumRange); intensity = 0.0; for (j = 0; j <= (ssize_t) MaxMap; j++) { intensity += histogram[GetPixelChannels(image) * j + i]; if (intensity > black_point) break; } black[i] = (double)j; intensity = 0.0; for (j = (ssize_t) MaxMap; j != 0; j--) { intensity += histogram[GetPixelChannels(image) * j + i]; if (intensity > ((double)image->columns * image->rows - white_point)) break; } white[i] = (double)j; } histogram = (double *)RelinquishMagickMemory(histogram); /* * Stretch the histogram to create the stretched image mapping. */ (void)memset(stretch_map, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*stretch_map)); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j = 0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma = PerceptibleReciprocal(white[i] - black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image) * j + i] = 0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image) * j + i] = (double)QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image) * j + i] = (double)ScaleMapToQuantum( (double)(MaxMap * gamma * (j - black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* * Stretch-contrast colormap. */ for (j = 0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, RedPixelChannel); image->colormap[j].red = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red)) + i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, GreenPixelChannel); image->colormap[j].green = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green)) + i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, BluePixelChannel); image->colormap[j].blue = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue)) + i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i = GetPixelChannelOffset(image, AlphaPixelChannel); image->colormap[j].alpha = stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha)) + i]; } } } /* * Stretch-contrast image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j] = ClampToQuantum(stretch_map[GetPixelChannels(image) * ScaleQuantumToMap(q[j]) + j]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, ContrastStretchImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); stretch_map = (double *)RelinquishMagickMemory(stretch_map); white = (double *)RelinquishMagickMemory(white); black = (double *)RelinquishMagickMemory(black); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % E n h a n c e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % EnhanceImage() applies a digital filter that improves the * quality of a % noisy image. % % The format of the EnhanceImage method * is: % % Image *EnhanceImage(const Image *image,ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * EnhanceImage(const Image * image, ExceptionInfo * exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView * enhance_view, *image_view; Image * enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Initialize enhanced image attributes. */ assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image = CloneImage(image, 0, 0, MagickTrue, exception); if (enhance_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(enhance_image, DirectClass, exception) == MagickFalse) { enhance_image = DestroyImage(enhance_image); return ((Image *) NULL); } /* * Enhance image. */ status = MagickTrue; progress = 0; image_view = AcquireVirtualCacheView(image, exception); enhance_view = AcquireAuthenticCacheView(enhance_image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum * magick_restrict p; register Quantum * magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, -2, y - 2, image->columns + 4, 5, exception); q = QueueCacheViewAuthenticPixels(enhance_view, 0, y, enhance_image->columns, 1, exception); if ((p == (const Quantum *)NULL) || (q == (Quantum *) NULL)) { status = MagickFalse; continue; } center = (ssize_t) GetPixelChannels(image) * (2 * (image->columns + 4) + 2); GetPixelInfo(image, &pixel); for (x = 0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum * magick_restrict r; GetPixelInfo(image, &aggregate); total_weight = 0.0; GetPixelInfoPixel(image, p + center, &pixel); r = p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r = p + GetPixelChannels(image) * (image->columns + 4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r = p + 2 * GetPixelChannels(image) * (image->columns + 4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r = p + 3 * GetPixelChannels(image) * (image->columns + 4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r = p + 4 * GetPixelChannels(image) * (image->columns + 4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { pixel.red = ((aggregate.red + total_weight / 2.0) / total_weight); pixel.green = ((aggregate.green + total_weight / 2.0) / total_weight); pixel.blue = ((aggregate.blue + total_weight / 2.0) / total_weight); pixel.black = ((aggregate.black + total_weight / 2.0) / total_weight); pixel.alpha = ((aggregate.alpha + total_weight / 2.0) / total_weight); } SetPixelViaPixelInfo(enhance_image, &pixel, q); p += GetPixelChannels(image); q += GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, EnhanceImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } enhance_view = DestroyCacheView(enhance_view); image_view = DestroyCacheView(image_view); if (status == MagickFalse) enhance_image = DestroyImage(enhance_image); return (enhance_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % E q u a l i z e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % EqualizeImage() applies a histogram equalization to the image. % * % The format of the EqualizeImage method is: % % MagickBooleanType * EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of * each parameter follows: % % o image: the image. % % o exception: * return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image * image, ExceptionInfo * exception) { #define EqualizeImageTag "Equalize/Image" CacheView * image_view; double black[CompositePixelChannel + 1], *equalize_map, *histogram, *map, white[CompositePixelChannel + 1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image, exception) != MagickFalse) return (MagickTrue); #endif if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); equalize_map = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*equalize_map)); histogram = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*histogram)); map = (double *)AcquireQuantumMemory(MaxMap + 1UL, MaxPixelChannels * sizeof(*map)); if ((equalize_map == (double *)NULL) || (histogram == (double *)NULL) || (map == (double *)NULL)) { if (map != (double *)NULL) map = (double *)RelinquishMagickMemory(map); if (histogram != (double *)NULL) histogram = (double *)RelinquishMagickMemory(histogram); if (equalize_map != (double *)NULL) equalize_map = (double *)RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } /* * Form histogram. */ status = MagickTrue; (void)memset(histogram, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*histogram)); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity = (double)p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity = GetPixelIntensity(image, p); histogram[GetPixelChannels(image) * ScaleQuantumToMap( ClampToQuantum(intensity)) + i]++; } p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); /* * Integrate the histogram to get the equalization map. */ for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity = 0.0; for (j = 0; j <= (ssize_t) MaxMap; j++) { intensity += histogram[GetPixelChannels(image) * j + i]; map[GetPixelChannels(image) * j + i] = intensity; } } (void)memset(equalize_map, 0, (MaxMap + 1) * GetPixelChannels(image) * sizeof(*equalize_map)); (void)memset(black, 0, sizeof(*black)); (void)memset(white, 0, sizeof(*white)); for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i] = map[i]; white[i] = map[GetPixelChannels(image) * MaxMap + i]; if (black[i] != white[i]) for (j = 0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image) * j + i] = (double) ScaleMapToQuantum((double)((MaxMap * (map[ GetPixelChannels(image) * j + i] - black[i])) / (white[i] - black[i]))); } histogram = (double *)RelinquishMagickMemory(histogram); map = (double *)RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* * Equalize colormap. */ for (j = 0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red)) + channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green)) + channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue)) + channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha = equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha)) + channel]; } } } /* * Equalize image. */ progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j] = ClampToQuantum(equalize_map[GetPixelChannels(image) * ScaleQuantumToMap(q[j]) + j]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, EqualizeImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); equalize_map = (double *)RelinquishMagickMemory(equalize_map); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G a m m a I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GammaImage() gamma-corrects a particular image channel. The * same % image viewed on different devices will have perceptual differences * in the % way the image's intensities are represented on the screen. * Specify % individual gamma levels for the red, green, and blue channels, * or adjust % all three with the gamma parameter. Values typically range * from 0.8 to 2.3. % % You can also reduce the influence of a particular * channel with a gamma % value of 0. % % The format of the GammaImage * method is: % % MagickBooleanType GammaImage(Image *image,const double * gamma, % ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o level: the image * gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value, const double gamma) { return (value < 0.0 ? value : pow(value, gamma)); } MagickExport MagickBooleanType GammaImage(Image * image, const double gamma, ExceptionInfo * exception) { #define GammaImageTag "Gamma/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; Quantum * gamma_map; register ssize_t i; ssize_t y; /* * Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (gamma == 1.0) return (MagickTrue); gamma_map = (Quantum *) AcquireQuantumMemory(MaxMap + 1UL, sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); (void)memset(gamma_map, 0, (MaxMap + 1) * sizeof(*gamma_map)); if (gamma != 0.0) for (i = 0; i <= (ssize_t) MaxMap; i++) gamma_map[i] = ScaleMapToQuantum((double)(MaxMap * pow((double)i / MaxMap, PerceptibleReciprocal(gamma)))); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Gamma-correct colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; } /* * Gamma-correct image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType) q[j]))]; } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, GammaImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); gamma_map = (Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma *= gamma; return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G r a y s c a l e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GrayscaleImage() converts the image to grayscale. % % The * format of the GrayscaleImage method is: % % MagickBooleanType * GrayscaleImage(Image *image, % const PixelIntensityMethod method * ,ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o method: the pixel intensity method. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image * image, const PixelIntensityMethod method, ExceptionInfo * exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image, exception) == MagickFalse) return (MagickFalse); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image, method, exception) != MagickFalse) { image->intensity = method; image->type = GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return (SetImageColorspace(image, LinearGRAYColorspace, exception)); return (SetImageColorspace(image, GRAYColorspace, exception)); } #endif /* * Grayscale image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; red = (MagickRealType) GetPixelRed(image, q); green = (MagickRealType) GetPixelGreen(image, q); blue = (MagickRealType) GetPixelBlue(image, q); intensity = 0.0; switch (method) { case AveragePixelIntensityMethod: { intensity = (red + green + blue) / 3.0; break; } case BrightnessPixelIntensityMethod: { intensity = MagickMax(MagickMax(red, green), blue); break; } case LightnessPixelIntensityMethod: { intensity = (MagickMin(MagickMin(red, green), blue) + MagickMax(MagickMax(red, green), blue)) / 2.0; break; } case MSPixelIntensityMethod: { intensity = (MagickRealType) (((double)red * red + green * green + blue * blue) / 3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red = EncodePixelGamma(red); green = EncodePixelGamma(green); blue = EncodePixelGamma(blue); } intensity = 0.298839 * red + 0.586811 * green + 0.114350 * blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red = DecodePixelGamma(red); green = DecodePixelGamma(green); blue = DecodePixelGamma(blue); } intensity = 0.298839 * red + 0.586811 * green + 0.114350 * blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red = EncodePixelGamma(red); green = EncodePixelGamma(green); blue = EncodePixelGamma(blue); } intensity = 0.212656 * red + 0.715158 * green + 0.072186 * blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red = DecodePixelGamma(red); green = DecodePixelGamma(green); blue = DecodePixelGamma(blue); } intensity = 0.212656 * red + 0.715158 * green + 0.072186 * blue; break; } case RMSPixelIntensityMethod: { intensity = (MagickRealType) (sqrt((double)red * red + green * green + blue * blue) / sqrt(3.0)); break; } } SetPixelGray(image, ClampToQuantum(intensity), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, GrayscaleImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); image->intensity = method; image->type = GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return (SetImageColorspace(image, LinearGRAYColorspace, exception)); return (SetImageColorspace(image, GRAYColorspace, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % H a l d C l u t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % HaldClutImage() applies a Hald color lookup table to the image. * A Hald % color lookup table is a 3-dimensional color cube mapped to 2 * dimensions. % Create it with the HALD coder. You can apply any color * transformation to % the Hald image and then use this method to apply the * transform to the % image. % % The format of the HaldClutImage method is: * % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image, which is replaced by indexed CLUT values % % o * hald_image: the color lookup table image for replacement color values. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image * image, const Image * hald_image, ExceptionInfo * exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView * hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); /* * Hald clut image. */ status = MagickTrue; progress = 0; length = (size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level = 2; (level * level * level) < length; level++); level *= level; cube_size = level * level; width = (double)hald_image->columns; GetPixelInfo(hald_image, &zero); hald_view = AcquireVirtualCacheView(hald_image, exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double area, offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x = QuantumScale * (level - 1.0) * GetPixelRed(image, q); point.y = QuantumScale * (level - 1.0) * GetPixelGreen(image, q); point.z = QuantumScale * (level - 1.0) * GetPixelBlue(image, q); offset = point.x + level * floor(point.y) + cube_size * floor(point.z); point.x -= floor(point.x); point.y -= floor(point.y); point.z -= floor(point.z); pixel1 = zero; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset, width), floor(offset / width), &pixel1, exception); if (status == MagickFalse) break; pixel2 = zero; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset + level, width), floor((offset + level) / width), &pixel2, exception); if (status == MagickFalse) break; pixel3 = zero; area = point.y; if (hald_image->interpolate == NearestInterpolatePixel) area = (point.y < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel1, pixel1.alpha, &pixel2, pixel2.alpha, area, &pixel3); offset += cube_size; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset, width), floor(offset / width), &pixel1, exception); if (status == MagickFalse) break; status = InterpolatePixelInfo(hald_image, hald_view, hald_image->interpolate, fmod(offset + level, width), floor((offset + level) / width), &pixel2, exception); if (status == MagickFalse) break; pixel4 = zero; CompositePixelInfoAreaBlend(&pixel1, pixel1.alpha, &pixel2, pixel2.alpha, area, &pixel4); pixel = zero; area = point.z; if (hald_image->interpolate == NearestInterpolatePixel) area = (point.z < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel3, pixel3.alpha, &pixel4, pixel4.alpha, area, &pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image, ClampToQuantum(pixel.red), q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image, ClampToQuantum(pixel.green), q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image, ClampToQuantum(pixel.blue), q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image, ClampToQuantum(pixel.black), q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image, ClampToQuantum(pixel.alpha), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, HaldClutImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } hald_view = DestroyCacheView(hald_view); image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L e v e l I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LevelImage() adjusts the levels of a particular image channel by * % scaling the colors falling between specified white and black points to * % the full available quantum range. % % The parameters provided * represent the black, and white points. The black % point specifies the * darkest color in the image. Colors darker than the % black point are set * to zero. White point specifies the lightest color in % the image. * Colors brighter than the white point are set to the maximum % quantum * value. % % If a '!' flag is given, map black and white colors to the * given levels % rather than mapping those levels to black and white. See * % LevelizeImage() below. % % Gamma specifies a gamma correction to apply * to the image. % % The format of the LevelImage method is: % % * MagickBooleanType LevelImage(Image *image,const double black_point, % * const double white_point,const double gamma,ExceptionInfo *exception) % % * A description of each parameter follows: % % o image: the image. % % * o black_point: The level to map zero (black) to. % % o white_point: The * level to map QuantumRange (white) to. % % o exception: return any * errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point, const double gamma, const double pixel) { double level_pixel, scale; scale = PerceptibleReciprocal(white_point - black_point); level_pixel = QuantumRange * gamma_pow(scale * ((double)pixel - black_point), PerceptibleReciprocal(gamma)); return (level_pixel); } MagickExport MagickBooleanType LevelImage(Image * image, const double black_point, const double white_point, const double gamma, ExceptionInfo * exception) { #define LevelImageTag "Level/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)ClampToQuantum(LevelPixel(black_point, white_point, gamma, image->colormap[i].alpha)); } /* * Level image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = ClampToQuantum(LevelPixel(black_point, white_point, gamma, (double)q[j])); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, LevelImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); (void)ClampImage(image, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L e v e l i z e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LevelizeImage() applies the reversed LevelImage() operation to * just % the specific channels specified. It compresses the full range of * color % values, so that they lie between the given black and white * points. Gamma is % applied before the values are mapped. % % * LevelizeImage() can be called with by using a +level command line % API * option, or using a '!' on a -level or LevelImage() geometry string. % % * It can be used to de-contrast a greyscale image to the exact levels % * specified. Or by using specific levels for each channel of an image you % * can convert a gray-scale image to any linear color gradient, according to * % those levels. % % The format of the LevelizeImage method is: % % * MagickBooleanType LevelizeImage(Image *image,const double black_point, % * const double white_point,const double gamma,ExceptionInfo *exception) % % * A description of each parameter follows: % % o image: the image. % % * o black_point: The level to map zero (black) to. % % o white_point: The * level to map QuantumRange (white) to. % % o gamma: adjust gamma by this * factor before mapping values. % % o exception: return any errors or * warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image * image, const double black_point, const double white_point, const double gamma, ExceptionInfo * exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* * Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (double)LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (double)LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (double)LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (double)LevelizeValue( image->colormap[i].alpha); } /* * Level image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = LevelizeValue(q[j]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, LevelizeImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L e v e l I m a g e C o l o r s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LevelImageColors() maps the given color to "black" and "white" * values, % linearly spreading out the colors, and level values on a * channel by channel % bases, as per LevelImage(). The given colors allows * you to specify % different level ranges for each of the color channels * separately. % % If the boolean 'invert' is set true the image values will * modifyed in the % reverse direction. That is any existing "black" and * "white" colors in the % image will become the color values given, with * all other values compressed % appropriately. This effectivally maps a * greyscale gradient into the given % color gradient. % % The format of * the LevelImageColors method is: % % MagickBooleanType * LevelImageColors(Image *image, % const PixelInfo *black_color,const * PixelInfo *white_color, % const MagickBooleanType * invert,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o black_color: The color to map * black to/from % % o white_point: The color to map white to/from % % * o invert: if true map the colors (levelize), rather than from (level) % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image * image, const PixelInfo * black_color, const PixelInfo * white_color, const MagickBooleanType invert, ExceptionInfo * exception) { ChannelType channel_mask; MagickStatusType status; /* * Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void)SetImageColorspace(image, sRGBColorspace, exception); status = MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, RedChannel); status &= LevelImage(image, black_color->red, white_color->red, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, GreenChannel); status &= LevelImage(image, black_color->green, white_color->green, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, BlueChannel); status &= LevelImage(image, black_color->blue, white_color->blue, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask = SetImageChannelMask(image, BlackChannel); status &= LevelImage(image, black_color->black, white_color->black, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask = SetImageChannelMask(image, AlphaChannel); status &= LevelImage(image, black_color->alpha, white_color->alpha, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, RedChannel); status &= LevelizeImage(image, black_color->red, white_color->red, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, GreenChannel); status &= LevelizeImage(image, black_color->green, white_color->green, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask = SetImageChannelMask(image, BlueChannel); status &= LevelizeImage(image, black_color->blue, white_color->blue, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask = SetImageChannelMask(image, BlackChannel); status &= LevelizeImage(image, black_color->black, white_color->black, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask = SetImageChannelMask(image, AlphaChannel); status &= LevelizeImage(image, black_color->alpha, white_color->alpha, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } } return (status != 0 ? MagickTrue : MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % L i n e a r S t r e t c h I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % LinearStretchImage() discards any pixels below the black point * and above % the white point and levels the remaining pixels. % % The * format of the LinearStretchImage method is: % % MagickBooleanType * LinearStretchImage(Image *image, % const double black_point,const * double white_point, % ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o * black_point: the black point. % % o white_point: the white point. % % * o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image * image, const double black_point, const double white_point, ExceptionInfo * exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView * image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* * Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram = (double *)AcquireQuantumMemory(MaxMap + 1UL, sizeof(*histogram)); if (histogram == (double *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); /* * Form histogram. */ (void)memset(histogram, 0, (MaxMap + 1) * sizeof(*histogram)); image_view = AcquireVirtualCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (const Quantum *)NULL) break; for (x = 0; x < (ssize_t) image->columns; x++) { intensity = GetPixelIntensity(image, p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p += GetPixelChannels(image); } } image_view = DestroyCacheView(image_view); /* * Find the histogram boundaries by locating the black and white point * levels. */ intensity = 0.0; for (black = 0; black < (ssize_t) MaxMap; black++) { intensity += histogram[black]; if (intensity >= black_point) break; } intensity = 0.0; for (white = (ssize_t) MaxMap; white != 0; white--) { intensity += histogram[white]; if (intensity >= white_point) break; } histogram = (double *)RelinquishMagickMemory(histogram); status = LevelImage(image, (double)ScaleMapToQuantum((MagickRealType) black), (double)ScaleMapToQuantum((MagickRealType) white), 1.0, exception); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % M o d u l a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ModulateImage() lets you control the brightness, saturation, and * hue % of an image. Modulate represents the brightness, saturation, and * hue % as one parameter (e.g. 90,150,100). If the image colorspace is * HSL, the % modulation is lightness, saturation, and hue. For HWB, use * blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. * % % The format of the ModulateImage method is: % % MagickBooleanType * ModulateImage(Image *image,const char *modulate, % ExceptionInfo * *exception) % % A description of each parameter follows: % % o image: * the image. % % o modulate: Define the percent change in brightness, * saturation, and hue. % % o exception: return any errors or warnings in * this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma, const double percent_luma, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red, *green, *blue, &hue, &chroma, &luma); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; chroma *= 0.01 * percent_chroma; luma *= 0.01 * percent_luma; ConvertHCLToRGB(hue, chroma, luma, red, green, blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma, const double percent_luma, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red, *green, *blue, &hue, &chroma, &luma); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; chroma *= 0.01 * percent_chroma; luma *= 0.01 * percent_luma; ConvertHCLpToRGB(hue, chroma, luma, red, green, blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation, const double percent_brightness, double *red, double *green, double *blue) { double brightness, hue, saturation; /* * Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red, *green, *blue, &hue, &saturation, &brightness); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; brightness *= 0.01 * percent_brightness; ConvertHSBToRGB(hue, saturation, brightness, red, green, blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation, const double percent_intensity, double *red, double *green, double *blue) { double intensity, hue, saturation; /* * Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red, *green, *blue, &hue, &saturation, &intensity); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; intensity *= 0.01 * percent_intensity; ConvertHSIToRGB(hue, saturation, intensity, red, green, blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation, const double percent_lightness, double *red, double *green, double *blue) { double hue, lightness, saturation; /* * Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red, *green, *blue, &hue, &saturation, &lightness); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; lightness *= 0.01 * percent_lightness; ConvertHSLToRGB(hue, saturation, lightness, red, green, blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation, const double percent_value, double *red, double *green, double *blue) { double hue, saturation, value; /* * Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red, *green, *blue, &hue, &saturation, &value); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; saturation *= 0.01 * percent_saturation; value *= 0.01 * percent_value; ConvertHSVToRGB(hue, saturation, value, red, green, blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness, const double percent_blackness, double *red, double *green, double *blue) { double blackness, hue, whiteness; /* * Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red, *green, *blue, &hue, &whiteness, &blackness); hue += fmod((percent_hue - 100.0), 200.0) / 200.0; blackness *= 0.01 * percent_blackness; whiteness *= 0.01 * percent_whiteness; ConvertHWBToRGB(hue, whiteness, blackness, red, green, blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma, const double percent_hue, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red, *green, *blue, &luma, &chroma, &hue); luma *= 0.01 * percent_luma; chroma *= 0.01 * percent_chroma; hue += fmod((percent_hue - 100.0), 200.0) / 200.0; ConvertLCHabToRGB(luma, chroma, hue, red, green, blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma, const double percent_hue, double *red, double *green, double *blue) { double hue, luma, chroma; /* * Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red, *green, *blue, &luma, &chroma, &hue); luma *= 0.01 * percent_luma; chroma *= 0.01 * percent_chroma; hue += fmod((percent_hue - 100.0), 200.0) / 200.0; ConvertLCHuvToRGB(luma, chroma, hue, red, green, blue); } MagickExport MagickBooleanType ModulateImage(Image * image, const char *modulate, ExceptionInfo * exception) { #define ModulateImageTag "Modulate/Image" CacheView * image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* * Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (modulate == (char *)NULL) return (MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void)SetImageColorspace(image, sRGBColorspace, exception); flags = ParseGeometry(modulate, &geometry_info); percent_brightness = geometry_info.rho; percent_saturation = geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation = 100.0; percent_hue = geometry_info.xi; if ((flags & XiValue) == 0) percent_hue = 100.0; colorspace = UndefinedColorspace; artifact = GetImageArtifact(image, "modulate:colorspace"); if (artifact != (const char *)NULL) colorspace = (ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse, artifact); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* * Modulate image colormap. */ red = (double)image->colormap[i].red; green = (double)image->colormap[i].green; blue = (double)image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSBColorspace: { ModulateHSB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSIColorspace: { ModulateHSI(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSVColorspace: { ModulateHSV(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HWBColorspace: { ModulateHWB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } } image->colormap[i].red = red; image->colormap[i].green = green; image->colormap[i].blue = blue; } /* * Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image, percent_brightness, percent_hue, percent_saturation, colorspace, exception) != MagickFalse) return (MagickTrue); #endif status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double blue, green, red; red = (double)GetPixelRed(image, q); green = (double)GetPixelGreen(image, q); blue = (double)GetPixelBlue(image, q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSBColorspace: { ModulateHSB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HSVColorspace: { ModulateHSV(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case HWBColorspace: { ModulateHWB(percent_hue, percent_saturation, percent_brightness, &red, &green, &blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness, percent_saturation, percent_hue, &red, &green, &blue); break; } } SetPixelRed(image, ClampToQuantum(red), q); SetPixelGreen(image, ClampToQuantum(green), q); SetPixelBlue(image, ClampToQuantum(blue), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, ModulateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N e g a t e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % NegateImage() negates the colors in the reference image. The * grayscale % option means that only grayscale values within the image are * negated. % % The format of the NegateImage method is: % % * MagickBooleanType NegateImage(Image *image, % const * MagickBooleanType grayscale,ExceptionInfo *exception) % % A description * of each parameter follows: % % o image: the image. % % o grayscale: * If MagickTrue, only negate grayscale pixels within the image. % % o * exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image * image, const MagickBooleanType grayscale, ExceptionInfo * exception) { #define NegateImageTag "Negate/Image" CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (image->storage_class == PseudoClass) for (i = 0; i < (ssize_t) image->colors; i++) { /* * Negate colormap. */ if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = QuantumRange - image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = QuantumRange - image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = QuantumRange - image->colormap[i].blue; } /* * Negate image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); if (grayscale != MagickFalse) { for (y = 0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (IsPixelGray(image, q) == MagickFalse) { q += GetPixelChannels(image); continue; } for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = QuantumRange - q[j]; } q += GetPixelChannels(image); } sync = SyncCacheViewAuthenticPixels(image_view, exception); if (sync == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, NegateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (MagickTrue); } /* * Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t j; for (j = 0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image, j); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j] = QuantumRange - q[j]; } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, NegateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % N o r m a l i z e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % The NormalizeImage() method enhances the contrast of a color * image by % mapping the darkest 2 percent of all pixel to black and the * brightest % 1 percent to white. % % The format of the NormalizeImage * method is: % % MagickBooleanType NormalizeImage(Image * *image,ExceptionInfo *exception) % % A description of each parameter * follows: % % o image: the image. % % o exception: return any errors * or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image * image, ExceptionInfo * exception) { double black_point, white_point; black_point = (double)image->columns * image->rows * 0.0015; white_point = (double)image->columns * image->rows * 0.9995; return (ContrastStretchImage(image, black_point, white_point, exception)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S i g m o i d a l C o n t r a s t I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a * non-linear % sigmoidal contrast algorithm. Increase the contrast of the * image using a % sigmoidal transfer function without saturating highlights * or shadows. % Contrast indicates how much to increase the contrast (0 is * none; 3 is % typical; 20 is pushing it); mid-point indicates where * midtones fall in the % resultant image (0 is white; 50% is middle-gray; * 100% is black). Set % sharpen to MagickTrue to increase the image * contrast otherwise the contrast % is reduced. % % The format of the * SigmoidalContrastImage method is: % % MagickBooleanType * SigmoidalContrastImage(Image *image, % const MagickBooleanType * sharpen,const char *levels, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: the image. % % o * sharpen: Increase or decrease image contrast. % % o contrast: strength * of the contrast, the larger the number the more % 'threshold-like' it * becomes. % % o midpoint: midpoint of the function as a color value 0 to * QuantumRange. % % o exception: return any errors or warnings in this * structure. % */ /* * ImageMagick 6 has a version of this function which uses LUTs. */ /* * Sigmoidal function Sigmoidal with inflexion point moved to b and "slope * constant" set to a. * * The first version, based on the hyperbolic tangent tanh, when combined with * the scaling step, is an exact arithmetic clone of the sigmoid function * based on the logistic curve. The equivalence is based on the identity * * 1/(1+exp(-t)) = (1+tanh(t/2))/2 * * (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled * sigmoidal derivation is invariant under affine transformations of the * ordinate. * * The tanh version is almost certainly more accurate and cheaper. The 0.5 * factor in the argument is to clone the legacy ImageMagick behavior. The * reason for making the define depend on atanh even though it only uses tanh * has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* * Scaled sigmoidal function: * * ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - * Sigmoidal(a,b,0) ) * * See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and * http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit * of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by * zero. This is fixed below by exiting immediately when contrast is small, * leaving the image (or colormap) unmodified. This appears to be safe * because the series expansion of the logistic sigmoidal function around x=b * is * * 1/2-a*(b-x)/4+... * * so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* * Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may * be 0 or 1, the argument of the hyperbolic tangent (resp. logistic * sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when * creating a LUT from in gamut values, hence the branching. In addition, * HDRI may have out of gamut values. InverseScaledSigmoidal is not a * two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is * unavoidable. */ static inline double InverseScaledSigmoidal(const double a, const double b, const double x) { const double sig0 = Sigmoidal(a, b, 0.0); const double sig1 = Sigmoidal(a, b, 1.0); const double argument = (sig1 - sig0) * x + sig0; const double clamped = ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1 + MagickEpsilon ? -1 + MagickEpsilon : (argument > 1 - MagickEpsilon ? 1 - MagickEpsilon : argument) ); return (b + (2.0 / a) * atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : (argument > 1 - MagickEpsilon ? 1 - MagickEpsilon : argument) ); return (b - log(1.0 / clamped - 1.0) / a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image * image, const MagickBooleanType sharpen, const double contrast, const double midpoint, ExceptionInfo * exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView * image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); /* * Side effect: may clamp values unless contrast<MagickEpsilon, in which * case nothing is done. */ if (contrast < MagickEpsilon) return (MagickTrue); /* * Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if (sharpen != MagickFalse) for (i = 0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i = 0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red = (MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green = (MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue = (MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha = (MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* * Sigmoidal-contrast enhance image. */ status = MagickTrue; progress = 0; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); PixelTrait traits = GetPixelChannelTraits(image, channel); if ((traits & UpdatePixelTrait) == 0) continue; if (sharpen != MagickFalse) q[i] = ScaledSig(q[i]); else q[i] = InverseScaledSig(q[i]); } q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, SigmoidalContrastImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % W h i t e B a l a n c e I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % WhiteBalanceImage() applies white balancing to an image * according to a % grayworld assumption in the LAB colorspace. % % The * format of the WhiteBalanceImage method is: % % MagickBooleanType * WhiteBalanceImage(Image *image, % ExceptionInfo *exception) % % A * description of each parameter follows: % % o image: The image to * auto-level % % o exception: return any errors or warnings in this * structure. % */ MagickExport MagickBooleanType WhiteBalanceImage(Image * image, ExceptionInfo * exception) { #define WhiteBalanceImageTag "WhiteBalance/Image" CacheView * image_view; const char *artifact; double a_mean, b_mean; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * White balance image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass, exception) == MagickFalse) return (MagickFalse); status = TransformImageColorspace(image, LabColorspace, exception); a_mean = 0.0; b_mean = 0.0; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register const Quantum * magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p = GetCacheViewVirtualPixels(image_view, 0, y, image->columns, 1, exception); if (p == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { a_mean += QuantumScale * GetPixela(image, p) - 0.5; b_mean += QuantumScale * GetPixelb(image, p) - 0.5; p += GetPixelChannels(image); } } a_mean /= ((double)image->columns * image->rows); b_mean /= ((double)image->columns * image->rows); progress = 0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double a, b; /* * Scale the chroma distance shifted according to amount of * luminance. */ a = (double)GetPixela(image, q) - 1.1 * GetPixelL(image, q) * a_mean; b = (double)GetPixelb(image, q) - 1.1 * GetPixelL(image, q) * b_mean; SetPixela(image, ClampToQuantum(a), q); SetPixelb(image, ClampToQuantum(b), q); q += GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, WhiteBalanceImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); artifact = GetImageArtifact(image, "white-balance:vibrance"); if (artifact != (const char *)NULL) { ChannelType channel_mask; double black_point; GeometryInfo geometry_info; MagickStatusType flags; /* * Level the a & b channels. */ flags = ParseGeometry(artifact, &geometry_info); black_point = geometry_info.rho; if ((flags & PercentValue) != 0) black_point *= (double)(QuantumRange / 100.0); channel_mask = SetImageChannelMask(image, aChannel | bChannel); status &= LevelImage(image, black_point, (double)QuantumRange - black_point, 1.0, exception); (void)SetImageChannelMask(image, channel_mask); } status &= TransformImageColorspace(image, sRGBColorspace, exception); return (status); }
interpolate_v2_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #endif vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolate( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); for (int d = 0; d < out_d; d++) { // loop for images int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, d, k, l) = input_t(i, j, in_d, in_k, in_l); } else { // NDHWC output_t(i, d, k, l, j) = input_t(i, in_d, in_k, in_l, j); } } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); for (int d = 0; d < out_d; d++) { int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_d, in_k, in_l) += output_grad_t(i, j, d, k, l); } else { input_grad_t(i, in_d, in_k, in_l, j) += output_grad_t(i, d, k, l, j); } } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1.; if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { // float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolate<T>(input, output, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolateGrad<T>(output_grad, input_grad, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
#pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #endif vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolate( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); for (int d = 0; d < out_d; d++) { // loop for images int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, d, k, l) = input_t(i, j, in_d, in_k, in_l); } else { // NDHWC output_t(i, d, k, l, j) = input_t(i, in_d, in_k, in_l, j); } } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); for (int d = 0; d < out_d; d++) { int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_d, in_k, in_l) += output_grad_t(i, j, d, k, l); } else { input_grad_t(i, in_d, in_k, in_l, j) += output_grad_t(i, d, k, l, j); } } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1.; if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { // float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolate<T>(input, output, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolateGrad<T>(output_grad, input_grad, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
#pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #ifdef PADDLE_WITH_ASCEND_CL if (platform::is_npu_place(new_data_tensor->place())) { TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } #endif vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolate( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); for (int d = 0; d < out_d; d++) { // loop for images int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, d, k, l) = input_t(i, j, in_d, in_k, in_l); } else { // NDHWC output_t(i, d, k, l, j) = input_t(i, in_d, in_k, in_l, j); } } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void NearestNeighbor3DInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); for (int d = 0; d < out_d; d++) { int in_d = (align_corners) ? static_cast<int>(ratio_d * d + 0.5) : static_cast<int>(ratio_d * d); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_d, in_k, in_l) += output_grad_t(i, j, d, k, l); } else { input_grad_t(i, in_d, in_k, in_l, j) += output_grad_t(i, d, k, l, j); } } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); float scale_w = -1.; if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { // float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_w > 0. && scale_h > 0. && scale_d > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolate<T>(input, output, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale_w = -1.0; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale_w = scale_data[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } else { if (scale.size() > 0) { scale_w = scale[0]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); } } if (scale_w > 0.) { out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_h = scale_data[0]; scale_w = scale_data[1]; } else { scale_w = scale_data[0]; scale_h = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } else { if (scale.size() > 1) { scale_h = scale[0]; scale_w = scale[1]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); } } if (scale_h > 0. && scale_w > 0.) { out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale_d = -1; float scale_h = -1; float scale_w = -1; auto scale_tensor = ctx.Input<Tensor>("Scale"); auto scale = ctx.Attr<std::vector<float>>("scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); if (scale_data.size() > 1) { scale_d = scale_data[0]; scale_h = scale_data[1]; scale_w = scale_data[2]; } else { scale_d = scale_data[0]; scale_h = scale_data[0]; scale_w = scale_data[0]; } PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in input 'Scale' Tensor of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } else { if (scale.size() > 1) { scale_d = scale[0]; scale_h = scale[1]; scale_w = scale[2]; PADDLE_ENFORCE_EQ( scale_w > 0, true, platform::errors::InvalidArgument( "The scale_w in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_w)); PADDLE_ENFORCE_EQ( scale_h > 0, true, platform::errors::InvalidArgument( "The scale_h in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_h)); PADDLE_ENFORCE_EQ( scale_d > 0, true, platform::errors::InvalidArgument( "The scale_d in Attr(scale) of Operator(interpolate) " "should be greater than 0, but received value is %d.", scale_d)); } } if (scale_d > 0. && scale_h > 0. && scale_w > 0.) { out_d = static_cast<int>(in_d * scale_d); out_h = static_cast<int>(in_h * scale_h); out_w = static_cast<int>(in_w * scale_w); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { float new_scale_d = 0.f; new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d) : static_cast<float>(in_d) / out_d; ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(new_scale_d); } if (out_h > 1) { float new_scale_h = 0.f; new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h) : static_cast<float>(in_h) / out_h; ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(new_scale_h); } if (out_w > 1) { float new_scale_w = 0.f; new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w) : static_cast<float>(in_w) / out_w; ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(new_scale_w); } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighbor3DInterpolateGrad<T>(output_grad, input_grad, ratio_d, ratio_h, ratio_w, n, c, out_d, out_h, out_w, align_corners, data_layout); } } template <typename T> class InterpolateV2Kernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateV2GradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
convolution_sgemm_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 16) tmp.create(16 * maxk, inch / 8 + inch % 8, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 8 + inch % 8, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator); } #else // __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ if (inch >= 8) { if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif // __aarch64__ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char* tmpptr = tmp.channel(i / 16); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "ld1 {v4.16b}, [%4] \n" "ld1 {v5.16b}, [%5] \n" "ld1 {v6.16b}, [%6] \n" "ld1 {v7.16b}, [%7] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n" "st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "ld1 {v4.8b}, [%4] \n" "ld1 {v5.8b}, [%5] \n" "ld1 {v6.8b}, [%6] \n" "ld1 {v7.8b}, [%7] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n" "st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n" : "=r"(img0), // %0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else // __ARM_FEATURE_DOTPROD int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif // __ARM_FEATURE_DOTPROD #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char* tmpptr = tmp.channel(i / 4); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img4[2]; tmpptr[1] = img5[2]; tmpptr[2] = img6[2]; tmpptr[3] = img7[2]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img4[2]; tmpptr[5] = img5[2]; tmpptr[6] = img6[2]; tmpptr[7] = img7[2]; tmpptr += 8; tmpptr[0] = img0[3]; tmpptr[1] = img1[3]; tmpptr[2] = img2[3]; tmpptr[3] = img3[3]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char* tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #endif // __ARM_FEATURE_DOTPROD img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i; const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i; const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i; const signed char* img4 = (const signed char*)bottom_im2col.channel(q + 4) + i; const signed char* img5 = (const signed char*)bottom_im2col.channel(q + 5) + i; const signed char* img6 = (const signed char*)bottom_im2col.channel(q + 6) + i; const signed char* img7 = (const signed char*)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #else // __ARM_NEON tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); { #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < size; i++) { signed char* tmpptr = tmp.channel(i); int q = 0; for (; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #endif // __ARM_NEON int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p + 1); int* outptr2 = top_blob.channel(p + 2); int* outptr3 = top_blob.channel(p + 3); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "cmp %w4, #0 \n" "beq 1f \n" "ld1 {v8.16b}, [%7], #16 \n" // _w0123_l "ld1 {v0.16b}, [%6], #16 \n" // _val0123_l "0: \n" "ld1 {v1.16b}, [%6], #16 \n" // _val4567_l "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "ld1 {v2.16b}, [%6], #16 \n" // _val891011_l "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "ld1 {v3.16b}, [%6], #16 \n" // _val12131415_l "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "ld1 {v9.16b}, [%7], #16 \n" // _w0123_h "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "ld1 {v4.16b}, [%6], #16 \n" // _val0123_h "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "ld1 {v5.16b}, [%6], #16 \n" // _val4567_h "sdot v16.4s, v9.16b, v4.4b[0] \n" "sdot v17.4s, v9.16b, v4.4b[1] \n" "sdot v18.4s, v9.16b, v4.4b[2] \n" "sdot v19.4s, v9.16b, v4.4b[3] \n" "ld1 {v6.16b}, [%6], #16 \n" // _val891011_h "sdot v20.4s, v9.16b, v5.4b[0] \n" "sdot v21.4s, v9.16b, v5.4b[1] \n" "sdot v22.4s, v9.16b, v5.4b[2] \n" "sdot v23.4s, v9.16b, v5.4b[3] \n" "ld1 {v7.16b}, [%6], #16 \n" // _val12131415_h "sdot v24.4s, v9.16b, v6.4b[0] \n" "sdot v25.4s, v9.16b, v6.4b[1] \n" "ld1 {v8.16b}, [%7], #16 \n" // _w0123_l "sdot v26.4s, v9.16b, v6.4b[2] \n" "sdot v27.4s, v9.16b, v6.4b[3] \n" "ld1 {v0.16b}, [%6], #16 \n" // _val0123_l "sdot v28.4s, v9.16b, v7.4b[0] \n" "sdot v29.4s, v9.16b, v7.4b[1] \n" "subs %w4, %w4, #1 \n" "sdot v30.4s, v9.16b, v7.4b[2] \n" "sdot v31.4s, v9.16b, v7.4b[3] \n" "bne 0b \n" "sub %6, %6, #16 \n" "sub %7, %7, #16 \n" "1: \n" "lsr w4, %w5, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "ld1 {v8.8b, v9.8b}, [%7], #16 \n" "ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%6], #64 \n" "uzp1 v10.8b, v8.8b, v9.8b \n" "uzp2 v11.8b, v8.8b, v9.8b \n" "uzp1 v4.16b, v0.16b, v1.16b \n" "uzp2 v5.16b, v0.16b, v1.16b \n" "uzp1 v6.16b, v2.16b, v3.16b \n" "uzp2 v7.16b, v2.16b, v3.16b \n" "uzp1 v8.8b, v10.8b, v11.8b \n" "uzp2 v9.8b, v10.8b, v11.8b \n" "uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5 "uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d "mov v8.d[1], v9.d[0] \n" // _w "uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7 "uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v2.4b[0] \n" "sdot v19.4s, v8.16b, v2.4b[1] \n" "sdot v20.4s, v8.16b, v0.4b[2] \n" "sdot v21.4s, v8.16b, v0.4b[3] \n" "sdot v22.4s, v8.16b, v2.4b[2] \n" "sdot v23.4s, v8.16b, v2.4b[3] \n" "sdot v24.4s, v8.16b, v1.4b[0] \n" "sdot v25.4s, v8.16b, v1.4b[1] \n" "sdot v26.4s, v8.16b, v3.4b[0] \n" "sdot v27.4s, v8.16b, v3.4b[1] \n" "sdot v28.4s, v8.16b, v1.4b[2] \n" "sdot v29.4s, v8.16b, v1.4b[3] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "and w4, %w5, #3 \n" // w4 = remain = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 5f \n" "4: \n" "ld1 {v1.8b}, [%7] \n" "ld1 {v0.16b}, [%6] \n" "sshll v1.8h, v1.8b, #0 \n" "sshll v2.8h, v0.8b, #0 \n" "sshll2 v3.8h, v0.16b, #0 \n" "smlal v16.4s, v1.4h, v2.h[0] \n" "smlal v17.4s, v1.4h, v2.h[1] \n" "smlal v18.4s, v1.4h, v2.h[2] \n" "smlal v19.4s, v1.4h, v2.h[3] \n" "smlal v20.4s, v1.4h, v2.h[4] \n" "smlal v21.4s, v1.4h, v2.h[5] \n" "smlal v22.4s, v1.4h, v2.h[6] \n" "smlal v23.4s, v1.4h, v2.h[7] \n" "smlal v24.4s, v1.4h, v3.h[0] \n" "smlal v25.4s, v1.4h, v3.h[1] \n" "smlal v26.4s, v1.4h, v3.h[2] \n" "smlal v27.4s, v1.4h, v3.h[3] \n" "smlal v28.4s, v1.4h, v3.h[4] \n" "smlal v29.4s, v1.4h, v3.h[5] \n" "smlal v30.4s, v1.4h, v3.h[6] \n" "smlal v31.4s, v1.4h, v3.h[7] \n" "add %6, %6, #16 \n" "add %7, %7, #4 \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" // transpose 4x16 "trn1 v0.4s, v16.4s, v17.4s \n" "trn2 v1.4s, v16.4s, v17.4s \n" "trn1 v2.4s, v18.4s, v19.4s \n" "trn2 v3.4s, v18.4s, v19.4s \n" "trn1 v4.4s, v20.4s, v21.4s \n" "trn2 v5.4s, v20.4s, v21.4s \n" "trn1 v6.4s, v22.4s, v23.4s \n" "trn2 v7.4s, v22.4s, v23.4s \n" "trn1 v8.4s, v24.4s, v25.4s \n" "trn2 v9.4s, v24.4s, v25.4s \n" "trn1 v10.4s, v26.4s, v27.4s \n" "trn2 v11.4s, v26.4s, v27.4s \n" "trn1 v12.4s, v28.4s, v29.4s \n" "trn2 v13.4s, v28.4s, v29.4s \n" "trn1 v14.4s, v30.4s, v31.4s \n" "trn2 v15.4s, v30.4s, v31.4s \n" "trn1 v16.2d, v0.2d, v2.2d \n" "trn2 v24.2d, v0.2d, v2.2d \n" "trn1 v20.2d, v1.2d, v3.2d \n" "trn2 v28.2d, v1.2d, v3.2d \n" "trn1 v17.2d, v4.2d, v6.2d \n" "trn2 v25.2d, v4.2d, v6.2d \n" "trn1 v21.2d, v5.2d, v7.2d \n" "trn2 v29.2d, v5.2d, v7.2d \n" "trn1 v18.2d, v8.2d, v10.2d \n" "trn2 v26.2d, v8.2d, v10.2d \n" "trn1 v22.2d, v9.2d, v11.2d \n" "trn2 v30.2d, v9.2d, v11.2d \n" "trn1 v19.2d, v12.2d, v14.2d \n" "trn2 v27.2d, v12.2d, v14.2d \n" "trn1 v23.2d, v13.2d, v15.2d \n" "trn2 v31.2d, v13.2d, v15.2d \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%3], #64 \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x8x4_t _val4 = vld4_s8(tmpptr); int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]); int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]); int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]); int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3); tmpptr += 32; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _val4 = vdup_n_s16(tmpptr[4]); int16x4_t _val5 = vdup_n_s16(tmpptr[5]); int16x4_t _val6 = vdup_n_s16(tmpptr[6]); int16x4_t _val7 = vdup_n_s16(tmpptr[7]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); _sum4 = vmlal_s16(_sum4, _val4, _w0123); _sum5 = vmlal_s16(_sum5, _val5, _w0123); _sum6 = vmlal_s16(_sum6, _val6, _w0123); _sum7 = vmlal_s16(_sum7, _val7, _w0123); tmpptr += 8; kptr0 += 4; } // transpose 4x8 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5); int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); _sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0])); _sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1])); _sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0])); _sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); vst1q_s32(outptr0 + 4, _sum4); vst1q_s32(outptr1 + 4, _sum5); vst1q_s32(outptr2 + 4, _sum6); vst1q_s32(outptr3 + 4, _sum7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x16_t _val = vld1q_s8(tmpptr); int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val)); int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]); int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3); tmpptr += 16; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); tmpptr += 4; kptr0 += 4; } // transpose 4x4 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #else // __ARM_FEATURE_DOTPROD asm volatile( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "cmp %w4, #0 \n" "beq 3f \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%6, #128] \n" "prfm pldl1keep, [%7, #256] \n" "lsr w4, %w4, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%7, #512] \n" "add x5, %6, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%6] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%7], #64 \n" "add %6, %6, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%6] \n" "add %6, %6, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%7, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%7, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%6] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %6, %6, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%6] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %6, %6, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%7], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%6, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%6, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %6, %6, #64 \n" "sub %7, %7, #64 \n" "1: \n" "and w4, %w4, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%6], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%7], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%6], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "3: \n" "lsr w4, %w5, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v8.16b}, [%6], #16 \n" "ld1 {v9.16b}, [%7], #16 \n" "sshll v4.8h, v8.8b, #0 \n" "sshll2 v5.8h, v8.16b, #0 \n" "sshll v6.8h, v9.8b, #0 \n" "sshll2 v7.8h, v9.16b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "smlal2 v0.4s, v6.8h, v4.h[4] \n" "smlal2 v1.4s, v6.8h, v4.h[5] \n" "smlal2 v2.4s, v6.8h, v4.h[6] \n" "smlal2 v3.4s, v6.8h, v4.h[7] \n" "smlal v0.4s, v7.4h, v5.h[0] \n" "smlal v1.4s, v7.4h, v5.h[1] \n" "smlal v2.4s, v7.4h, v5.h[2] \n" "smlal v3.4s, v7.4h, v5.h[3] \n" "smlal2 v0.4s, v7.8h, v5.h[4] \n" "smlal2 v1.4s, v7.8h, v5.h[5] \n" "smlal2 v2.4s, v7.8h, v5.h[6] \n" "smlal2 v3.4s, v7.8h, v5.h[7] \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" "and w4, %w5, #3 \n" // w4 = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "ld1 {v4.8b}, [%6] \n" "ld1 {v6.8b}, [%7] \n" "sshll v4.8h, v4.8b, #0 \n" "sshll v6.8h, v6.8b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "add %6, %6, #4 \n" "add %7, %7, #4 \n" "subs w4, w4, #1 \n" "bne 6b \n" "7: \n" // transpose 4x4 "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "trn1 v6.4s, v2.4s, v3.4s \n" "trn2 v7.4s, v2.4s, v3.4s \n" "trn1 v0.2d, v4.2d, v6.2d \n" "trn2 v2.2d, v4.2d, v6.2d \n" "trn1 v1.2d, v5.2d, v7.2d \n" "trn2 v3.2d, v5.2d, v7.2d \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v2.4s}, [%2], #16 \n" "st1 {v3.4s}, [%3], #16 \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __aarch64__ int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2); _sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); _sum00 = vpaddq_s32(_s001, _s023); _sum10 = vpaddq_s32(_s101, _s123); } #endif // __ARM_FEATURE_DOTPROD int j = 0; for (; j + 3 < nn1; j += 4) { int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr)); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0); _sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1); _sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2); _sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3); _sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4); _sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5); _sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6); _sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7); tmpptr += 8; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum00 = vmlal_s16(_sum00, _val0, _w0123); _sum10 = vmlal_s16(_sum10, _val1, _w0123); tmpptr += 2; kptr0 += 4; } vst1q_lane_s32(outptr0, _sum00, 0); vst1q_lane_s32(outptr1, _sum00, 1); vst1q_lane_s32(outptr2, _sum00, 2); vst1q_lane_s32(outptr3, _sum00, 3); vst1q_lane_s32(outptr0 + 1, _sum10, 0); vst1q_lane_s32(outptr1 + 1, _sum10, 1); vst1q_lane_s32(outptr2 + 1, _sum10, 2); vst1q_lane_s32(outptr3 + 1, _sum10, 3); outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; #else // __aarch64__ asm volatile( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "cmp %4, #0 \n" "beq 3f \n" "pld [%6, #256] \n" "lsr r4, %4, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %7, #16 \n" "pld [%7, #128] \n" "mov r6, #32 \n" "pld [%7, #384] \n" "vld1.s8 {d20-d21}, [%7 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%6 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%7 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%6, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%7, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%6 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%7 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%6 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%7 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%6, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%7, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %6, %6, #32 \n" "sub %7, %7, #64 \n" "1: \n" "and r4, %4, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%6 :128]! \n" // _val "vld1.s8 {d20-d21}, [%7 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%7 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "3: \n" "lsr r4, %5, #2 \n" // r4 = nn1 >> 2 "cmp r4, #0 \n" "beq 5f \n" "4: \n" "vld1.s8 {d4}, [%6]! \n" "vmovl.s8 q2, d4 \n" "vld1.s8 {d10-d11}, [%7]! \n" "vmovl.s8 q3, d10 \n" "vmovl.s8 q4, d11 \n" "vmlal.s16 q0, d6, d4[0] \n" "vmlal.s16 q1, d6, d4[1] \n" "vmlal.s16 q0, d7, d4[2] \n" "vmlal.s16 q1, d7, d4[3] \n" "vmlal.s16 q0, d8, d5[0] \n" "vmlal.s16 q1, d8, d5[1] \n" "vmlal.s16 q0, d9, d5[2] \n" "vmlal.s16 q1, d9, d5[3] \n" "subs r4, r4, #1 \n" "bne 4b \n" "5: \n" "and r4, %5, #3 \n" // r4 = nn1 & 3 "cmp r4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "vld1.s8 {d4[]}, [%6]! \n" "vld1.s8 {d6[]}, [%6]! \n" "vmovl.s8 q2, d4 \n" "vmovl.s8 q3, d6 \n" "vld1.s8 {d8}, [%7] \n" "vmovl.s8 q4, d8 \n" "vmlal.s16 q0, d4, d8 \n" "vmlal.s16 q1, d6, d8 \n" "add %7, %7, #4 \n" "subs r4, r4, #1 \n" "bne 6b \n" "7: \n" "vst1.s32 {d0[0]}, [%0]! \n" "vst1.s32 {d0[1]}, [%1]! \n" "vst1.s32 {d1[0]}, [%2]! \n" "vst1.s32 {d1[1]}, [%3]! \n" "vst1.s32 {d2[0]}, [%0]! \n" "vst1.s32 {d2[1]}, [%1]! \n" "vst1.s32 {d3[0]}, [%2]! \n" "vst1.s32 {d3[1]}, [%3]! \n" : "=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) : "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } #else // __ARM_FEATURE_DOTPROD if (nn > 0) { int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); _sum0 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); _sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif } #endif // __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 3 < nn1; j += 4) { int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr))); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0); _sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1); _sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2); _sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3); tmpptr += 4; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val = vdup_n_s16(tmpptr[0]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val, _w0123); tmpptr += 1; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; } } remain_outch_start += nn_outch << 2; #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32); int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48); int8x16_t _val0123_h = vld1q_s8(tmpptr + 64); int8x16_t _val4567_h = vld1q_s8(tmpptr + 80); int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96); int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0); _sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0); _sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1); _sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1); _sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1); tmpptr += 128; kptr0 += 8; } int j = 0; for (; j < nn1; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); if (nn > 0) { int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1); tmpptr += 64; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); } int j = 0; for (; j < nn1; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s)); tmpptr += 8; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); outptr0 += 8; } #endif // __ARM_FEATURE_DOTPROD for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); #else // __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _val2 = vld1q_s8(tmpptr + 32); int8x16_t _val3 = vld1q_s8(tmpptr + 48); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w)); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w)); _s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w)); _s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 64; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); _sum4 = vaddq_s32(_sum4, _sum5); _sum6 = vaddq_s32(_sum6, _sum7); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4)); int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6)); int32x2_t _ss0 = vpadd_s32(_s0, _s2); int32x2_t _ss1 = vpadd_s32(_s4, _s6); _sum0 = vcombine_s32(_ss0, _ss1); #endif // __ARM_FEATURE_DOTPROD } int sum0123[4] = {0, 0, 0, 0}; int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char w = kptr0[0]; sum0123[0] += val0 * w; sum0123[1] += val1 * w; sum0123[2] += val2 * w; sum0123[3] += val3 * w; tmpptr += 4; kptr0 += 1; } _sum0 = vaddq_s32(_sum0, vld1q_s32(sum0123)); vst1q_s32(outptr0, _sum0); outptr0 += 4; } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x2_t _sum = vdup_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x2_t _sum0 = vdup_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val01_lh = vld1q_s8(tmpptr); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0); _sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1); tmpptr += 16; kptr0 += 8; } _sum = vadd_s32(_sum0, _sum1); #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 32; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); _sum = vpadd_s32(_s0, _s2); #endif // __ARM_FEATURE_DOTPROD } int sum01[2] = {0, 0}; int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char w = kptr0[0]; sum01[0] += val0 * w; sum01[1] += val1 * w; tmpptr += 2; kptr0 += 1; } _sum = vadd_s32(_sum, vld1_s32(sum01)); vst1_s32(outptr0, _sum); outptr0 += 2; } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int sum = 0; if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); _sum0 = vdotq_s32(_sum0, _val, _w); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); _sum1 = vdot_s32(_sum1, _val, _w); tmpptr += 8; kptr0 += 8; } sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1); #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w)); _s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s8 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 8; kptr0 += 8; } int32x4_t _sum = vaddq_s32(_sum0, _sum1); #if __aarch64__ sum = vaddvq_s32(_sum); // dot #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); sum = vget_lane_s32(_ss, 0); #endif #endif // __ARM_FEATURE_DOTPROD } int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #else // __ARM_NEON for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i); const signed char* kptr0 = kernel.channel(p); int nn1 = inch * maxk; int sum = 0; int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #endif // __ARM_NEON } } static void convolution_im2col_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; #if __ARM_NEON // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b // dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); if (outch >= 4) { if (inch >= 8) kernel_tm.create(32 * maxk, inch / 8 + inch % 8, outch / 4 + outch % 4, 1u); else kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, 1u); } else { if (inch >= 8) kernel_tm.create(8 * maxk, inch / 8 + inch % 8, outch, 1u); else kernel_tm.create(1 * maxk, inch, outch, 1u); } int q = 0; for (; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #endif } } // TODO unroll 4 for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } } // TODO unroll 2 for (; q < outch; q++) { signed char* g00 = kernel_tm.channel(q / 4 + q % 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } } // TODO unroll 4 for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { const signed char* k00 = kernel.channel(q).row<const signed char>(p); g00[0] = k00[k]; g00++; } } } #else // __ARM_NEON kernel_tm = _kernel.reshape(maxk, inch, outch); #endif // __ARM_NEON } static void convolution_im2col_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_int8_neon(const Mat & bottom_im2col, Mat & top_blob, const Mat & kernel, const Option & opt) { //Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; //permute Mat tmp; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 16) tmp.create(16 * maxk, inch / 8 + inch % 8, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 8 + inch % 8, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator); } #else /* // __ARM_FEATURE_DOTPROD */ if (inch >= 8) { if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif /* // __ARM_FEATURE_DOTPROD */ #else /* // __aarch64__ */ if (inch >= 8) { if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif /* // __aarch64__ */ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char *tmpptr = tmp.channel(i / 16); int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "ld1 {v4.16b}, [%4] \n" "ld1 {v5.16b}, [%5] \n" "ld1 {v6.16b}, [%6] \n" "ld1 {v7.16b}, [%7] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n" "st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n" : "=r" (img0), //%0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8); int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "ld1 {v4.8b}, [%4] \n" "ld1 {v5.8b}, [%5] \n" "ld1 {v6.8b}, [%6] \n" "ld1 {v7.8b}, [%7] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n" "st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n" : "=r" (img0), //%0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else /* // __ARM_FEATURE_DOTPROD */ int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif /* // __ARM_FEATURE_DOTPROD */ for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char *tmpptr = tmp.channel(i / 4); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img4[2]; tmpptr[1] = img5[2]; tmpptr[2] = img6[2]; tmpptr[3] = img7[2]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img4[2]; tmpptr[5] = img5[2]; tmpptr[6] = img6[2]; tmpptr[7] = img7[2]; tmpptr += 8; tmpptr[0] = img0[3]; tmpptr[1] = img1[3]; tmpptr[2] = img2[3]; tmpptr[3] = img3[3]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #endif /* // __ARM_FEATURE_DOTPROD */ img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char *tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #endif /* // __ARM_FEATURE_DOTPROD */ img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char *tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #else /* // __ARM_NEON */ tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); { for (int i = 0; i < size; i++) { signed char *tmpptr = tmp.channel(i); int q = 0; for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #endif /* // __ARM_NEON */ int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON nn_outch = outch >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int *outptr0 = top_blob.channel(p); int *outptr1 = top_blob.channel(p + 1); int *outptr2 = top_blob.channel(p + 2); int *outptr3 = top_blob.channel(p + 3); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char *tmpptr = tmp.channel(i / 16); const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; asm volatile ( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "cmp %w4, #0 \n" "beq 1f \n" "ld1 {v8.16b}, [%7], #16 \n" // _w0123_l "ld1 {v0.16b}, [%6], #16 \n" // _val0123_l "0: \n" "ld1 {v1.16b}, [%6], #16 \n" // _val4567_l "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "ld1 {v2.16b}, [%6], #16 \n" // _val891011_l "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "ld1 {v3.16b}, [%6], #16 \n" // _val12131415_l "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "ld1 {v9.16b}, [%7], #16 \n" // _w0123_h "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "ld1 {v4.16b}, [%6], #16 \n" // _val0123_h "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "ld1 {v5.16b}, [%6], #16 \n" // _val4567_h "sdot v16.4s, v9.16b, v4.4b[0] \n" "sdot v17.4s, v9.16b, v4.4b[1] \n" "sdot v18.4s, v9.16b, v4.4b[2] \n" "sdot v19.4s, v9.16b, v4.4b[3] \n" "ld1 {v6.16b}, [%6], #16 \n" // _val891011_h "sdot v20.4s, v9.16b, v5.4b[0] \n" "sdot v21.4s, v9.16b, v5.4b[1] \n" "sdot v22.4s, v9.16b, v5.4b[2] \n" "sdot v23.4s, v9.16b, v5.4b[3] \n" "ld1 {v7.16b}, [%6], #16 \n" // _val12131415_h "sdot v24.4s, v9.16b, v6.4b[0] \n" "sdot v25.4s, v9.16b, v6.4b[1] \n" "ld1 {v8.16b}, [%7], #16 \n" // _w0123_l "sdot v26.4s, v9.16b, v6.4b[2] \n" "sdot v27.4s, v9.16b, v6.4b[3] \n" "ld1 {v0.16b}, [%6], #16 \n" // _val0123_l "sdot v28.4s, v9.16b, v7.4b[0] \n" "sdot v29.4s, v9.16b, v7.4b[1] \n" "subs %w4, %w4, #1 \n" "sdot v30.4s, v9.16b, v7.4b[2] \n" "sdot v31.4s, v9.16b, v7.4b[3] \n" "bne 0b \n" "sub %6, %6, #16 \n" "sub %7, %7, #16 \n" "1: \n" "lsr w4, %w5, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "ld1 {v8.8b, v9.8b}, [%7], #16 \n" "ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%6], #64 \n" "uzp1 v10.8b, v8.8b, v9.8b \n" "uzp2 v11.8b, v8.8b, v9.8b \n" "uzp1 v4.16b, v0.16b, v1.16b \n" "uzp2 v5.16b, v0.16b, v1.16b \n" "uzp1 v6.16b, v2.16b, v3.16b \n" "uzp2 v7.16b, v2.16b, v3.16b \n" "uzp1 v8.8b, v10.8b, v11.8b \n" "uzp2 v9.8b, v10.8b, v11.8b \n" "uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5 "uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d "mov v8.d[1], v9.d[0] \n" // _w "uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7 "uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v2.4b[0] \n" "sdot v19.4s, v8.16b, v2.4b[1] \n" "sdot v20.4s, v8.16b, v0.4b[2] \n" "sdot v21.4s, v8.16b, v0.4b[3] \n" "sdot v22.4s, v8.16b, v2.4b[2] \n" "sdot v23.4s, v8.16b, v2.4b[3] \n" "sdot v24.4s, v8.16b, v1.4b[0] \n" "sdot v25.4s, v8.16b, v1.4b[1] \n" "sdot v26.4s, v8.16b, v3.4b[0] \n" "sdot v27.4s, v8.16b, v3.4b[1] \n" "sdot v28.4s, v8.16b, v1.4b[2] \n" "sdot v29.4s, v8.16b, v1.4b[3] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "and w4, %w5, #3 \n" // w4 = remain = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 5f \n" "4: \n" "ld1 {v1.8b}, [%7] \n" "ld1 {v0.16b}, [%6] \n" "sshll v1.8h, v1.8b, #0 \n" "sshll v2.8h, v0.8b, #0 \n" "sshll2 v3.8h, v0.16b, #0 \n" "smlal v16.4s, v1.4h, v2.h[0] \n" "smlal v17.4s, v1.4h, v2.h[1] \n" "smlal v18.4s, v1.4h, v2.h[2] \n" "smlal v19.4s, v1.4h, v2.h[3] \n" "smlal v20.4s, v1.4h, v2.h[4] \n" "smlal v21.4s, v1.4h, v2.h[5] \n" "smlal v22.4s, v1.4h, v2.h[6] \n" "smlal v23.4s, v1.4h, v2.h[7] \n" "smlal v24.4s, v1.4h, v3.h[0] \n" "smlal v25.4s, v1.4h, v3.h[1] \n" "smlal v26.4s, v1.4h, v3.h[2] \n" "smlal v27.4s, v1.4h, v3.h[3] \n" "smlal v28.4s, v1.4h, v3.h[4] \n" "smlal v29.4s, v1.4h, v3.h[5] \n" "smlal v30.4s, v1.4h, v3.h[6] \n" "smlal v31.4s, v1.4h, v3.h[7] \n" "add %6, %6, #16 \n" "add %7, %7, #4 \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" // transpose 4 x16 "trn1 v0.4s, v16.4s, v17.4s \n" "trn2 v1.4s, v16.4s, v17.4s \n" "trn1 v2.4s, v18.4s, v19.4s \n" "trn2 v3.4s, v18.4s, v19.4s \n" "trn1 v4.4s, v20.4s, v21.4s \n" "trn2 v5.4s, v20.4s, v21.4s \n" "trn1 v6.4s, v22.4s, v23.4s \n" "trn2 v7.4s, v22.4s, v23.4s \n" "trn1 v8.4s, v24.4s, v25.4s \n" "trn2 v9.4s, v24.4s, v25.4s \n" "trn1 v10.4s, v26.4s, v27.4s \n" "trn2 v11.4s, v26.4s, v27.4s \n" "trn1 v12.4s, v28.4s, v29.4s \n" "trn2 v13.4s, v28.4s, v29.4s \n" "trn1 v14.4s, v30.4s, v31.4s \n" "trn2 v15.4s, v30.4s, v31.4s \n" "trn1 v16.2d, v0.2d, v2.2d \n" "trn2 v24.2d, v0.2d, v2.2d \n" "trn1 v20.2d, v1.2d, v3.2d \n" "trn2 v28.2d, v1.2d, v3.2d \n" "trn1 v17.2d, v4.2d, v6.2d \n" "trn2 v25.2d, v4.2d, v6.2d \n" "trn1 v21.2d, v5.2d, v7.2d \n" "trn2 v29.2d, v5.2d, v7.2d \n" "trn1 v18.2d, v8.2d, v10.2d \n" "trn2 v26.2d, v8.2d, v10.2d \n" "trn1 v22.2d, v9.2d, v11.2d \n" "trn2 v30.2d, v9.2d, v11.2d \n" "trn1 v19.2d, v12.2d, v14.2d \n" "trn2 v27.2d, v12.2d, v14.2d \n" "trn1 v23.2d, v13.2d, v15.2d \n" "trn2 v31.2d, v13.2d, v15.2d \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%3], #64 \n" :"=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) :"0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) :"memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x8x4_t _val4 = vld4_s8(tmpptr); int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]); int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]); int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]); int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3); tmpptr += 32; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _val4 = vdup_n_s16(tmpptr[4]); int16x4_t _val5 = vdup_n_s16(tmpptr[5]); int16x4_t _val6 = vdup_n_s16(tmpptr[6]); int16x4_t _val7 = vdup_n_s16(tmpptr[7]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); _sum4 = vmlal_s16(_sum4, _val4, _w0123); _sum5 = vmlal_s16(_sum5, _val5, _w0123); _sum6 = vmlal_s16(_sum6, _val6, _w0123); _sum7 = vmlal_s16(_sum7, _val7, _w0123); tmpptr += 8; kptr0 += 4; } //transpose 4 x8 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5); int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); _sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0])); _sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1])); _sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0])); _sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); vst1q_s32(outptr0 + 4, _sum4); vst1q_s32(outptr1 + 4, _sum5); vst1q_s32(outptr2 + 4, _sum6); vst1q_s32(outptr3 + 4, _sum7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char *tmpptr = tmp.channel(i / 4); #endif const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x16_t _val = vld1q_s8(tmpptr); int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val)); int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]); int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3); tmpptr += 16; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); tmpptr += 4; kptr0 += 4; } //transpose 4 x4 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #else /* // __ARM_FEATURE_DOTPROD */ asm volatile ( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "cmp %w4, #0 \n" "beq 3f \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%6, #128] \n" "prfm pldl1keep, [%7, #256] \n" "lsr w4, %w4, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%7, #512] \n" "add x5, %6, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%6] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%7], #64 \n" "add %6, %6, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%6] \n" "add %6, %6, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%7, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%7, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%6] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %6, %6, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%6] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %6, %6, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%7], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%6, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%6, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %6, %6, #64 \n" "sub %7, %7, #64 \n" "1: \n" "and w4, %w4, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%6], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%7], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%6], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "3: \n" "lsr w4, %w5, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v8.16b}, [%6], #16 \n" "ld1 {v9.16b}, [%7], #16 \n" "sshll v4.8h, v8.8b, #0 \n" "sshll2 v5.8h, v8.16b, #0 \n" "sshll v6.8h, v9.8b, #0 \n" "sshll2 v7.8h, v9.16b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "smlal2 v0.4s, v6.8h, v4.h[4] \n" "smlal2 v1.4s, v6.8h, v4.h[5] \n" "smlal2 v2.4s, v6.8h, v4.h[6] \n" "smlal2 v3.4s, v6.8h, v4.h[7] \n" "smlal v0.4s, v7.4h, v5.h[0] \n" "smlal v1.4s, v7.4h, v5.h[1] \n" "smlal v2.4s, v7.4h, v5.h[2] \n" "smlal v3.4s, v7.4h, v5.h[3] \n" "smlal2 v0.4s, v7.8h, v5.h[4] \n" "smlal2 v1.4s, v7.8h, v5.h[5] \n" "smlal2 v2.4s, v7.8h, v5.h[6] \n" "smlal2 v3.4s, v7.8h, v5.h[7] \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" "and w4, %w5, #3 \n" // w4 = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "ld1 {v4.8b}, [%6] \n" "ld1 {v6.8b}, [%7] \n" "sshll v4.8h, v4.8b, #0 \n" "sshll v6.8h, v6.8b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "add %6, %6, #4 \n" "add %7, %7, #4 \n" "subs w4, w4, #1 \n" "bne 6b \n" "7: \n" // transpose 4 x4 "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "trn1 v6.4s, v2.4s, v3.4s \n" "trn2 v7.4s, v2.4s, v3.4s \n" "trn1 v0.2d, v4.2d, v6.2d \n" "trn2 v2.2d, v4.2d, v6.2d \n" "trn1 v1.2d, v5.2d, v7.2d \n" "trn2 v3.2d, v5.2d, v7.2d \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v2.4s}, [%2], #16 \n" "st1 {v3.4s}, [%3], #16 \n" :"=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) :"0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) :"memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif /* // __ARM_FEATURE_DOTPROD */ } #endif /* // __aarch64__ */ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2); #endif const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __aarch64__ int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2); _sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } #else /* // __ARM_FEATURE_DOTPROD */ if (nn > 0) { int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); _sum00 = vpaddq_s32(_s001, _s023); _sum10 = vpaddq_s32(_s101, _s123); } #endif /* // __ARM_FEATURE_DOTPROD */ int j = 0; for (; j + 3 < nn1; j += 4) { int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr)); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0); _sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1); _sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2); _sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3); _sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4); _sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5); _sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6); _sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7); tmpptr += 8; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum00 = vmlal_s16(_sum00, _val0, _w0123); _sum10 = vmlal_s16(_sum10, _val1, _w0123); tmpptr += 2; kptr0 += 4; } vst1q_lane_s32(outptr0, _sum00, 0); vst1q_lane_s32(outptr1, _sum00, 1); vst1q_lane_s32(outptr2, _sum00, 2); vst1q_lane_s32(outptr3, _sum00, 3); vst1q_lane_s32(outptr0 + 1, _sum10, 0); vst1q_lane_s32(outptr1 + 1, _sum10, 1); vst1q_lane_s32(outptr2 + 1, _sum10, 2); vst1q_lane_s32(outptr3 + 1, _sum10, 3); outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; #else /* // __aarch64__ */ asm volatile ( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "cmp %4, #0 \n" "beq 3f \n" "pld [%6, #256] \n" "lsr r4, %4, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %7, #16 \n" "pld [%7, #128] \n" "mov r6, #32 \n" "pld [%7, #384] \n" "vld1.s8 {d20-d21}, [%7 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%6 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%7 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%6, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%7, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%6 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%7 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%6 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%7 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%6, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%7, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %6, %6, #32 \n" "sub %7, %7, #64 \n" "1: \n" "and r4, %4, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%6 :128]! \n" // _val "vld1.s8 {d20-d21}, [%7 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%7 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "3: \n" "lsr r4, %5, #2 \n" // r4 = nn1 >> 2 "cmp r4, #0 \n" "beq 5f \n" "4: \n" "vld1.s8 {d4}, [%6]! \n" "vmovl.s8 q2, d4 \n" "vld1.s8 {d10-d11}, [%7]! \n" "vmovl.s8 q3, d10 \n" "vmovl.s8 q4, d11 \n" "vmlal.s16 q0, d6, d4[0] \n" "vmlal.s16 q1, d6, d4[1] \n" "vmlal.s16 q0, d7, d4[2] \n" "vmlal.s16 q1, d7, d4[3] \n" "vmlal.s16 q0, d8, d5[0] \n" "vmlal.s16 q1, d8, d5[1] \n" "vmlal.s16 q0, d9, d5[2] \n" "vmlal.s16 q1, d9, d5[3] \n" "subs r4, r4, #1 \n" "bne 4b \n" "5: \n" "and r4, %5, #3 \n" // r4 = nn1 & 3 "cmp r4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "vld1.s8 {d4[]}, [%6]! \n" "vld1.s8 {d6[]}, [%6]! \n" "vmovl.s8 q2, d4 \n" "vmovl.s8 q3, d6 \n" "vld1.s8 {d8}, [%7] \n" "vmovl.s8 q4, d8 \n" "vmlal.s16 q0, d4, d8 \n" "vmlal.s16 q1, d6, d8 \n" "add %7, %7, #4 \n" "subs r4, r4, #1 \n" "bne 6b \n" "7: \n" "vst1.s32 {d0[0]}, [%0]! \n" "vst1.s32 {d0[1]}, [%1]! \n" "vst1.s32 {d1[0]}, [%2]! \n" "vst1.s32 {d1[1]}, [%3]! \n" "vst1.s32 {d2[0]}, [%0]! \n" "vst1.s32 {d2[1]}, [%1]! \n" "vst1.s32 {d3[0]}, [%2]! \n" "vst1.s32 {d3[1]}, [%3]! \n" :"=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) :"0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) :"memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif /* // __aarch64__ */ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } #else /* // __ARM_FEATURE_DOTPROD */ if (nn > 0) { int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); _sum0 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); _sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif } #endif /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 3 < nn1; j += 4) { int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr))); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0); _sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1); _sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2); _sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3); tmpptr += 4; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val = vdup_n_s16(tmpptr[0]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val, _w0123); tmpptr += 1; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; } } remain_outch_start += nn_outch << 2; #endif /* // __ARM_NEON */ for (int p = remain_outch_start; p < outch; p++) { int *outptr0 = top_blob.channel(p); int i = 0; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char *tmpptr = tmp.channel(i / 16); const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32); int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48); int8x16_t _val0123_h = vld1q_s8(tmpptr + 64); int8x16_t _val4567_h = vld1q_s8(tmpptr + 80); int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96); int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0); _sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0); _sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1); _sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1); _sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1); tmpptr += 128; kptr0 += 8; } int j = 0; for (; j < nn1; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; } for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); if (nn > 0) { int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1); tmpptr += 64; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); } int j = 0; for (; j < nn1; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s)); tmpptr += 8; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); outptr0 += 8; } #endif /* // __ARM_FEATURE_DOTPROD */ for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char *tmpptr = tmp.channel(i / 4); #endif const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); #else /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _val2 = vld1q_s8(tmpptr + 32); int8x16_t _val3 = vld1q_s8(tmpptr + 48); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w)); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w)); _s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w)); _s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 64; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); _sum4 = vaddq_s32(_sum4, _sum5); _sum6 = vaddq_s32(_sum6, _sum7); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4)); int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6)); int32x2_t _ss0 = vpadd_s32(_s0, _s2); int32x2_t _ss1 = vpadd_s32(_s4, _s6); _sum0 = vcombine_s32(_ss0, _ss1); #endif /* // __ARM_FEATURE_DOTPROD */ } int sum0123[4] = {0, 0, 0, 0}; int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char w = kptr0[0]; sum0123[0] += val0 * w; sum0123[1] += val1 * w; sum0123[2] += val2 * w; sum0123[3] += val3 * w; tmpptr += 4; kptr0 += 1; } _sum0 = vaddq_s32(_sum0, vld1q_s32(sum0123)); vst1q_s32(outptr0, _sum0); outptr0 += 4; } #endif /* // __aarch64__ */ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2); #endif const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x2_t _sum = vdup_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x2_t _sum0 = vdup_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val01_lh = vld1q_s8(tmpptr); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0); _sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1); tmpptr += 16; kptr0 += 8; } _sum = vadd_s32(_sum0, _sum1); #else /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 32; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); _sum = vpadd_s32(_s0, _s2); #endif /* // __ARM_FEATURE_DOTPROD */ } int sum01[2] = {0, 0}; int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char w = kptr0[0]; sum01[0] += val0 * w; sum01[1] += val1 * w; tmpptr += 2; kptr0 += 1; } _sum = vadd_s32(_sum, vld1_s32(sum01)); vst1_s32(outptr0, _sum); outptr0 += 2; } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int sum = 0; if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); _sum0 = vdotq_s32(_sum0, _val, _w); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); _sum1 = vdot_s32(_sum1, _val, _w); tmpptr += 8; kptr0 += 8; } sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1); #else /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w)); _s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s8 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 8; kptr0 += 8; } int32x4_t _sum = vaddq_s32(_sum0, _sum1); #if __aarch64__ sum = vaddvq_s32(_sum); //dot #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); sum = vget_lane_s32(_ss, 0); #endif #endif /* // __ARM_FEATURE_DOTPROD */ } int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #else /* // __ARM_NEON */ for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i); const signed char *kptr0 = kernel.channel(p); int nn1 = inch * maxk; int sum = 0; int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #endif /* // __ARM_NEON */ } } static void convolution_im2col_sgemm_transform_kernel_int8_neon(const Mat & _kernel, Mat & kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; #if __ARM_NEON //interleave // src = maxk - inch - outch // dst = 8 a - 4 b - maxk - inch / 8 a - outch / 4 b // dst = 4 a - 4 b - 2 - maxk - inch / 8 a - outch / 4 b(arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); if (outch >= 4) { if (inch >= 8) kernel_tm.create(32 * maxk, inch / 8 + inch % 8, outch / 4 + outch % 4, 1u); else kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, 1u); } else { if (inch >= 8) kernel_tm.create(8 * maxk, inch / 8 + inch % 8, outch, 1u); else kernel_tm.create(1 * maxk, inch, outch, 1u); } int q = 0; for (; q + 3 < outch; q += 4) { signed char *g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } #endif } } //TODO unroll 4 for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p); g00[0] = k00[k]; g00++; } } } } //TODO unroll 2 for (; q < outch; q++) { signed char *g00 = kernel_tm.channel(q / 4 + q % 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 8; j++) { const signed char *k00 = kernel.channel(q).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } } //TODO unroll 4 for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { const signed char *k00 = kernel.channel(q).row < const signed char >(p); g00[0] = k00[k]; g00++; } } } #else /* // __ARM_NEON */ kernel_tm = _kernel.reshape(maxk, inch, outch); #endif /* // __ARM_NEON */ } static void convolution_im2col_sgemm_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; //im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char *ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char *sptr = img.row < const signed char >(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt); }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except // in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied.See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_int8_neon(const Mat & bottom_im2col, Mat & top_blob, const Mat & kernel, const Option & opt) { //Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; //permute Mat tmp; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (inch >= 8) { if (size >= 16) tmp.create(16 * maxk, inch / 8 + inch % 8, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch / 8 + inch % 8, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 1, opt.workspace_allocator); } #else /* // __ARM_FEATURE_DOTPROD */ if (inch >= 8) { if (size >= 4) tmp.create(4 * maxk, inch / 8 + inch % 8, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif /* // __ARM_FEATURE_DOTPROD */ #else /* // __aarch64__ */ if (inch >= 8) { if (size >= 2) tmp.create(2 * maxk, inch / 8 + inch % 8, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch / 8 + inch % 8, size, 8u, 8, opt.workspace_allocator); } else { if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); } #endif /* // __aarch64__ */ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char *tmpptr = tmp.channel(i / 16); int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "ld1 {v0.16b}, [%0] \n" "ld1 {v1.16b}, [%1] \n" "ld1 {v2.16b}, [%2] \n" "ld1 {v3.16b}, [%3] \n" "ld1 {v4.16b}, [%4] \n" "ld1 {v5.16b}, [%5] \n" "ld1 {v6.16b}, [%6] \n" "ld1 {v7.16b}, [%7] \n" "st4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%8], #64 \n" "st4 {v4.16b, v5.16b, v6.16b, v7.16b}, [%8], #64 \n" : "=r" (img0), //%0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8); int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "ld1 {v0.8b}, [%0] \n" "ld1 {v1.8b}, [%1] \n" "ld1 {v2.8b}, [%2] \n" "ld1 {v3.8b}, [%3] \n" "ld1 {v4.8b}, [%4] \n" "ld1 {v5.8b}, [%5] \n" "ld1 {v6.8b}, [%6] \n" "ld1 {v7.8b}, [%7] \n" "st4 {v0.8b, v1.8b, v2.8b, v3.8b}, [%8], #32 \n" "st4 {v4.8b, v5.8b, v6.8b, v7.8b}, [%8], #32 \n" : "=r" (img0), //%0 "=r"(img1), "=r"(img2), "=r"(img3), "=r"(img4), "=r"(img5), "=r"(img6), "=r"(img7), "=r"(tmpptr) // %8 : "0"(img0), "1"(img1), "2"(img2), "3"(img3), "4"(img4), "5"(img5), "6"(img6), "7"(img7), "8"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { asm volatile ( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r" (img0), //%0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else /* // __ARM_FEATURE_DOTPROD */ int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif /* // __ARM_FEATURE_DOTPROD */ #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char *tmpptr = tmp.channel(i / 4); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img0[3]; tmpptr[5] = img1[3]; tmpptr[6] = img2[3]; tmpptr[7] = img3[3]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img4[2]; tmpptr[1] = img5[2]; tmpptr[2] = img6[2]; tmpptr[3] = img7[2]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; tmpptr[0] = img0[2]; tmpptr[1] = img1[2]; tmpptr[2] = img2[2]; tmpptr[3] = img3[2]; tmpptr[4] = img4[2]; tmpptr[5] = img5[2]; tmpptr[6] = img6[2]; tmpptr[7] = img7[2]; tmpptr += 8; tmpptr[0] = img0[3]; tmpptr[1] = img1[3]; tmpptr[2] = img2[3]; tmpptr[3] = img3[3]; tmpptr[4] = img4[3]; tmpptr[5] = img5[3]; tmpptr[6] = img6[3]; tmpptr[7] = img7[3]; tmpptr += 8; #endif /* // __ARM_FEATURE_DOTPROD */ img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += size; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char *tmpptr = tmp.channel(i / 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img0[1]; tmpptr[5] = img1[1]; tmpptr[6] = img2[1]; tmpptr[7] = img3[1]; tmpptr += 8; tmpptr[0] = img4[0]; tmpptr[1] = img5[0]; tmpptr[2] = img6[0]; tmpptr[3] = img7[0]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #else tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; tmpptr[0] = img0[1]; tmpptr[1] = img1[1]; tmpptr[2] = img2[1]; tmpptr[3] = img3[1]; tmpptr[4] = img4[1]; tmpptr[5] = img5[1]; tmpptr[6] = img6[1]; tmpptr[7] = img7[1]; tmpptr += 8; #endif /* // __ARM_FEATURE_DOTPROD */ img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += size; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char *tmpptr = tmp.channel(i / 2 + i % 2); #endif int q = 0; for (; q + 7 < inch; q += 8) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; const signed char *img1 = (const signed char *)bottom_im2col.channel(q + 1) + i; const signed char *img2 = (const signed char *)bottom_im2col.channel(q + 2) + i; const signed char *img3 = (const signed char *)bottom_im2col.channel(q + 3) + i; const signed char *img4 = (const signed char *)bottom_im2col.channel(q + 4) + i; const signed char *img5 = (const signed char *)bottom_im2col.channel(q + 5) + i; const signed char *img6 = (const signed char *)bottom_im2col.channel(q + 6) + i; const signed char *img7 = (const signed char *)bottom_im2col.channel(q + 7) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr[4] = img4[0]; tmpptr[5] = img5[0]; tmpptr[6] = img6[0]; tmpptr[7] = img7[0]; tmpptr += 8; img0 += size; img1 += size; img2 += size; img3 += size; img4 += size; img5 += size; img6 += size; img7 += size; } } for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #else /* // __ARM_NEON */ tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator); { #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < size; i++) { signed char *tmpptr = tmp.channel(i); int q = 0; for (; q < inch; q++) { const signed char *img0 = (const signed char *)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += size; } } } } #endif /* // __ARM_NEON */ int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int *outptr0 = top_blob.channel(p); int *outptr1 = top_blob.channel(p + 1); int *outptr2 = top_blob.channel(p + 2); int *outptr3 = top_blob.channel(p + 3); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char *tmpptr = tmp.channel(i / 16); const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; asm volatile ( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "cmp %w4, #0 \n" "beq 1f \n" "ld1 {v8.16b}, [%7], #16 \n" // _w0123_l "ld1 {v0.16b}, [%6], #16 \n" // _val0123_l "0: \n" "ld1 {v1.16b}, [%6], #16 \n" // _val4567_l "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v0.4b[2] \n" "sdot v19.4s, v8.16b, v0.4b[3] \n" "ld1 {v2.16b}, [%6], #16 \n" // _val891011_l "sdot v20.4s, v8.16b, v1.4b[0] \n" "sdot v21.4s, v8.16b, v1.4b[1] \n" "sdot v22.4s, v8.16b, v1.4b[2] \n" "sdot v23.4s, v8.16b, v1.4b[3] \n" "ld1 {v3.16b}, [%6], #16 \n" // _val12131415_l "sdot v24.4s, v8.16b, v2.4b[0] \n" "sdot v25.4s, v8.16b, v2.4b[1] \n" "ld1 {v9.16b}, [%7], #16 \n" // _w0123_h "sdot v26.4s, v8.16b, v2.4b[2] \n" "sdot v27.4s, v8.16b, v2.4b[3] \n" "ld1 {v4.16b}, [%6], #16 \n" // _val0123_h "sdot v28.4s, v8.16b, v3.4b[0] \n" "sdot v29.4s, v8.16b, v3.4b[1] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "ld1 {v5.16b}, [%6], #16 \n" // _val4567_h "sdot v16.4s, v9.16b, v4.4b[0] \n" "sdot v17.4s, v9.16b, v4.4b[1] \n" "sdot v18.4s, v9.16b, v4.4b[2] \n" "sdot v19.4s, v9.16b, v4.4b[3] \n" "ld1 {v6.16b}, [%6], #16 \n" // _val891011_h "sdot v20.4s, v9.16b, v5.4b[0] \n" "sdot v21.4s, v9.16b, v5.4b[1] \n" "sdot v22.4s, v9.16b, v5.4b[2] \n" "sdot v23.4s, v9.16b, v5.4b[3] \n" "ld1 {v7.16b}, [%6], #16 \n" // _val12131415_h "sdot v24.4s, v9.16b, v6.4b[0] \n" "sdot v25.4s, v9.16b, v6.4b[1] \n" "ld1 {v8.16b}, [%7], #16 \n" // _w0123_l "sdot v26.4s, v9.16b, v6.4b[2] \n" "sdot v27.4s, v9.16b, v6.4b[3] \n" "ld1 {v0.16b}, [%6], #16 \n" // _val0123_l "sdot v28.4s, v9.16b, v7.4b[0] \n" "sdot v29.4s, v9.16b, v7.4b[1] \n" "subs %w4, %w4, #1 \n" "sdot v30.4s, v9.16b, v7.4b[2] \n" "sdot v31.4s, v9.16b, v7.4b[3] \n" "bne 0b \n" "sub %6, %6, #16 \n" "sub %7, %7, #16 \n" "1: \n" "lsr w4, %w5, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "ld1 {v8.8b, v9.8b}, [%7], #16 \n" "ld4 {v0.16b, v1.16b, v2.16b, v3.16b}, [%6], #64 \n" "uzp1 v10.8b, v8.8b, v9.8b \n" "uzp2 v11.8b, v8.8b, v9.8b \n" "uzp1 v4.16b, v0.16b, v1.16b \n" "uzp2 v5.16b, v0.16b, v1.16b \n" "uzp1 v6.16b, v2.16b, v3.16b \n" "uzp2 v7.16b, v2.16b, v3.16b \n" "uzp1 v8.8b, v10.8b, v11.8b \n" "uzp2 v9.8b, v10.8b, v11.8b \n" "uzp1 v0.16b, v4.16b, v5.16b \n" // 0 1 4 5 "uzp2 v1.16b, v4.16b, v5.16b \n" // 8 9 c d "mov v8.d[1], v9.d[0] \n" // _w "uzp1 v2.16b, v6.16b, v7.16b \n" // 2 3 6 7 "uzp2 v3.16b, v6.16b, v7.16b \n" // a b e f "sdot v16.4s, v8.16b, v0.4b[0] \n" "sdot v17.4s, v8.16b, v0.4b[1] \n" "sdot v18.4s, v8.16b, v2.4b[0] \n" "sdot v19.4s, v8.16b, v2.4b[1] \n" "sdot v20.4s, v8.16b, v0.4b[2] \n" "sdot v21.4s, v8.16b, v0.4b[3] \n" "sdot v22.4s, v8.16b, v2.4b[2] \n" "sdot v23.4s, v8.16b, v2.4b[3] \n" "sdot v24.4s, v8.16b, v1.4b[0] \n" "sdot v25.4s, v8.16b, v1.4b[1] \n" "sdot v26.4s, v8.16b, v3.4b[0] \n" "sdot v27.4s, v8.16b, v3.4b[1] \n" "sdot v28.4s, v8.16b, v1.4b[2] \n" "sdot v29.4s, v8.16b, v1.4b[3] \n" "sdot v30.4s, v8.16b, v3.4b[2] \n" "sdot v31.4s, v8.16b, v3.4b[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "and w4, %w5, #3 \n" // w4 = remain = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 5f \n" "4: \n" "ld1 {v1.8b}, [%7] \n" "ld1 {v0.16b}, [%6] \n" "sshll v1.8h, v1.8b, #0 \n" "sshll v2.8h, v0.8b, #0 \n" "sshll2 v3.8h, v0.16b, #0 \n" "smlal v16.4s, v1.4h, v2.h[0] \n" "smlal v17.4s, v1.4h, v2.h[1] \n" "smlal v18.4s, v1.4h, v2.h[2] \n" "smlal v19.4s, v1.4h, v2.h[3] \n" "smlal v20.4s, v1.4h, v2.h[4] \n" "smlal v21.4s, v1.4h, v2.h[5] \n" "smlal v22.4s, v1.4h, v2.h[6] \n" "smlal v23.4s, v1.4h, v2.h[7] \n" "smlal v24.4s, v1.4h, v3.h[0] \n" "smlal v25.4s, v1.4h, v3.h[1] \n" "smlal v26.4s, v1.4h, v3.h[2] \n" "smlal v27.4s, v1.4h, v3.h[3] \n" "smlal v28.4s, v1.4h, v3.h[4] \n" "smlal v29.4s, v1.4h, v3.h[5] \n" "smlal v30.4s, v1.4h, v3.h[6] \n" "smlal v31.4s, v1.4h, v3.h[7] \n" "add %6, %6, #16 \n" "add %7, %7, #4 \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" // transpose 4 x16 "trn1 v0.4s, v16.4s, v17.4s \n" "trn2 v1.4s, v16.4s, v17.4s \n" "trn1 v2.4s, v18.4s, v19.4s \n" "trn2 v3.4s, v18.4s, v19.4s \n" "trn1 v4.4s, v20.4s, v21.4s \n" "trn2 v5.4s, v20.4s, v21.4s \n" "trn1 v6.4s, v22.4s, v23.4s \n" "trn2 v7.4s, v22.4s, v23.4s \n" "trn1 v8.4s, v24.4s, v25.4s \n" "trn2 v9.4s, v24.4s, v25.4s \n" "trn1 v10.4s, v26.4s, v27.4s \n" "trn2 v11.4s, v26.4s, v27.4s \n" "trn1 v12.4s, v28.4s, v29.4s \n" "trn2 v13.4s, v28.4s, v29.4s \n" "trn1 v14.4s, v30.4s, v31.4s \n" "trn2 v15.4s, v30.4s, v31.4s \n" "trn1 v16.2d, v0.2d, v2.2d \n" "trn2 v24.2d, v0.2d, v2.2d \n" "trn1 v20.2d, v1.2d, v3.2d \n" "trn2 v28.2d, v1.2d, v3.2d \n" "trn1 v17.2d, v4.2d, v6.2d \n" "trn2 v25.2d, v4.2d, v6.2d \n" "trn1 v21.2d, v5.2d, v7.2d \n" "trn2 v29.2d, v5.2d, v7.2d \n" "trn1 v18.2d, v8.2d, v10.2d \n" "trn2 v26.2d, v8.2d, v10.2d \n" "trn1 v22.2d, v9.2d, v11.2d \n" "trn2 v30.2d, v9.2d, v11.2d \n" "trn1 v19.2d, v12.2d, v14.2d \n" "trn2 v27.2d, v12.2d, v14.2d \n" "trn1 v23.2d, v13.2d, v15.2d \n" "trn2 v31.2d, v13.2d, v15.2d \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%3], #64 \n" :"=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) :"0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) :"memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x8x4_t _val4 = vld4_s8(tmpptr); int8x8x2_t _val0145 = vuzp_s8(_val4.val[0], _val4.val[1]); int8x8x2_t _val2367 = vuzp_s8(_val4.val[2], _val4.val[3]); int8x16_t _val0123 = vcombine_s8(_val0145.val[0], _val2367.val[0]); int8x16_t _val4567 = vcombine_s8(_val0145.val[1], _val2367.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123f, _val4567, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123f, _val4567, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123f, _val4567, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123f, _val4567, 3); tmpptr += 32; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _val4 = vdup_n_s16(tmpptr[4]); int16x4_t _val5 = vdup_n_s16(tmpptr[5]); int16x4_t _val6 = vdup_n_s16(tmpptr[6]); int16x4_t _val7 = vdup_n_s16(tmpptr[7]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); _sum4 = vmlal_s16(_sum4, _val4, _w0123); _sum5 = vmlal_s16(_sum5, _val5, _w0123); _sum6 = vmlal_s16(_sum6, _val6, _w0123); _sum7 = vmlal_s16(_sum7, _val7, _w0123); tmpptr += 8; kptr0 += 4; } //transpose 4 x8 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5); int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); _sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0])); _sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1])); _sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0])); _sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); vst1q_s32(outptr0 + 4, _sum4); vst1q_s32(outptr1 + 4, _sum5); vst1q_s32(outptr2 + 4, _sum6); vst1q_s32(outptr3 + 4, _sum7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char *tmpptr = tmp.channel(i / 4); #endif const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } int j = 0; for (; j + 3 < nn1; j += 4) { int8x16_t _val = vld1q_s8(tmpptr); int8x8x2_t _val01 = vuzp_s8(vget_low_s8(_val), vget_high_s8(_val)); int8x8x2_t _val0123 = vuzp_s8(_val01.val[0], _val01.val[1]); int8x16_t _val0123f = vcombine_s8(_val0123.val[0], _val0123.val[1]); int8x16_t _w = vld1q_s8(kptr0); int8x8x2_t _w01 = vuzp_s8(vget_low_s8(_w), vget_high_s8(_w)); int8x8x2_t _w0123 = vuzp_s8(_w01.val[0], _w01.val[1]); int8x16_t _w0123f = vcombine_s8(_w0123.val[0], _w0123.val[1]); _sum0 = vdotq_laneq_s32(_sum0, _w0123f, _val0123f, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123f, _val0123f, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123f, _val0123f, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123f, _val0123f, 3); tmpptr += 16; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _val2 = vdup_n_s16(tmpptr[2]); int16x4_t _val3 = vdup_n_s16(tmpptr[3]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val0, _w0123); _sum1 = vmlal_s16(_sum1, _val1, _w0123); _sum2 = vmlal_s16(_sum2, _val2, _w0123); _sum3 = vmlal_s16(_sum3, _val3, _w0123); tmpptr += 4; kptr0 += 4; } //transpose 4 x4 int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1); int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3); _sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0])); _sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1])); _sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0])); _sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1])); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr1, _sum1); vst1q_s32(outptr2, _sum2); vst1q_s32(outptr3, _sum3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #else /* // __ARM_FEATURE_DOTPROD */ asm volatile ( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "cmp %w4, #0 \n" "beq 3f \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%6, #128] \n" "prfm pldl1keep, [%7, #256] \n" "lsr w4, %w4, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%7, #512] \n" "add x5, %6, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%6] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%7], #64 \n" "add %6, %6, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%6] \n" "add %6, %6, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%7, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%7, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%6] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %6, %6, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%6] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %6, %6, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%7], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%6, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%6, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %6, %6, #64 \n" "sub %7, %7, #64 \n" "1: \n" "and w4, %w4, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%6], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%7], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%6], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "3: \n" "lsr w4, %w5, #2 \n" // w4 = nn1 >> 2 "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v8.16b}, [%6], #16 \n" "ld1 {v9.16b}, [%7], #16 \n" "sshll v4.8h, v8.8b, #0 \n" "sshll2 v5.8h, v8.16b, #0 \n" "sshll v6.8h, v9.8b, #0 \n" "sshll2 v7.8h, v9.16b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "smlal2 v0.4s, v6.8h, v4.h[4] \n" "smlal2 v1.4s, v6.8h, v4.h[5] \n" "smlal2 v2.4s, v6.8h, v4.h[6] \n" "smlal2 v3.4s, v6.8h, v4.h[7] \n" "smlal v0.4s, v7.4h, v5.h[0] \n" "smlal v1.4s, v7.4h, v5.h[1] \n" "smlal v2.4s, v7.4h, v5.h[2] \n" "smlal v3.4s, v7.4h, v5.h[3] \n" "smlal2 v0.4s, v7.8h, v5.h[4] \n" "smlal2 v1.4s, v7.8h, v5.h[5] \n" "smlal2 v2.4s, v7.8h, v5.h[6] \n" "smlal2 v3.4s, v7.8h, v5.h[7] \n" "subs w4, w4, #1 \n" "bne 4b \n" "5: \n" "and w4, %w5, #3 \n" // w4 = nn1 & 3 "cmp w4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "ld1 {v4.8b}, [%6] \n" "ld1 {v6.8b}, [%7] \n" "sshll v4.8h, v4.8b, #0 \n" "sshll v6.8h, v6.8b, #0 \n" "smlal v0.4s, v6.4h, v4.h[0] \n" "smlal v1.4s, v6.4h, v4.h[1] \n" "smlal v2.4s, v6.4h, v4.h[2] \n" "smlal v3.4s, v6.4h, v4.h[3] \n" "add %6, %6, #4 \n" "add %7, %7, #4 \n" "subs w4, w4, #1 \n" "bne 6b \n" "7: \n" // transpose 4 x4 "trn1 v4.4s, v0.4s, v1.4s \n" "trn2 v5.4s, v0.4s, v1.4s \n" "trn1 v6.4s, v2.4s, v3.4s \n" "trn2 v7.4s, v2.4s, v3.4s \n" "trn1 v0.2d, v4.2d, v6.2d \n" "trn2 v2.2d, v4.2d, v6.2d \n" "trn1 v1.2d, v5.2d, v7.2d \n" "trn2 v3.2d, v5.2d, v7.2d \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v2.4s}, [%2], #16 \n" "st1 {v3.4s}, [%3], #16 \n" :"=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) :"0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) :"memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif /* // __ARM_FEATURE_DOTPROD */ } #endif /* // __aarch64__ */ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2); #endif const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; #if __aarch64__ int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum00 = vdotq_laneq_s32(_sum00, _w0123_l, _val01_l_h, 0); _sum10 = vdotq_laneq_s32(_sum10, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum00 = vdotq_laneq_s32(_sum00, _w0123_h, _val01_l_h, 2); _sum10 = vdotq_laneq_s32(_sum10, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } #else /* // __ARM_FEATURE_DOTPROD */ if (nn > 0) { int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); _sum00 = vpaddq_s32(_s001, _s023); _sum10 = vpaddq_s32(_s101, _s123); } #endif /* // __ARM_FEATURE_DOTPROD */ int j = 0; for (; j + 3 < nn1; j += 4) { int16x8_t _val01234567 = vmovl_s8(vld1_s8(tmpptr)); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum00 = vmlal_laneq_s16(_sum00, _w0123, _val01234567, 0); _sum10 = vmlal_laneq_s16(_sum10, _w0123, _val01234567, 1); _sum00 = vmlal_laneq_s16(_sum00, _w4567, _val01234567, 2); _sum10 = vmlal_laneq_s16(_sum10, _w4567, _val01234567, 3); _sum00 = vmlal_laneq_s16(_sum00, _w89ab, _val01234567, 4); _sum10 = vmlal_laneq_s16(_sum10, _w89ab, _val01234567, 5); _sum00 = vmlal_laneq_s16(_sum00, _wcdef, _val01234567, 6); _sum10 = vmlal_laneq_s16(_sum10, _wcdef, _val01234567, 7); tmpptr += 8; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val0 = vdup_n_s16(tmpptr[0]); int16x4_t _val1 = vdup_n_s16(tmpptr[1]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum00 = vmlal_s16(_sum00, _val0, _w0123); _sum10 = vmlal_s16(_sum10, _val1, _w0123); tmpptr += 2; kptr0 += 4; } vst1q_lane_s32(outptr0, _sum00, 0); vst1q_lane_s32(outptr1, _sum00, 1); vst1q_lane_s32(outptr2, _sum00, 2); vst1q_lane_s32(outptr3, _sum00, 3); vst1q_lane_s32(outptr0 + 1, _sum10, 0); vst1q_lane_s32(outptr1 + 1, _sum10, 1); vst1q_lane_s32(outptr2 + 1, _sum10, 2); vst1q_lane_s32(outptr3 + 1, _sum10, 3); outptr0 += 2; outptr1 += 2; outptr2 += 2; outptr3 += 2; #else /* // __aarch64__ */ asm volatile ( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "cmp %4, #0 \n" "beq 3f \n" "pld [%6, #256] \n" "lsr r4, %4, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %7, #16 \n" "pld [%7, #128] \n" "mov r6, #32 \n" "pld [%7, #384] \n" "vld1.s8 {d20-d21}, [%7 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%6 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%7 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%6, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%7, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%6 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%7 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%6 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%7 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%6, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%7, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %6, %6, #32 \n" "sub %7, %7, #64 \n" "1: \n" "and r4, %4, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%6 :128]! \n" // _val "vld1.s8 {d20-d21}, [%7 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%7 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "3: \n" "lsr r4, %5, #2 \n" // r4 = nn1 >> 2 "cmp r4, #0 \n" "beq 5f \n" "4: \n" "vld1.s8 {d4}, [%6]! \n" "vmovl.s8 q2, d4 \n" "vld1.s8 {d10-d11}, [%7]! \n" "vmovl.s8 q3, d10 \n" "vmovl.s8 q4, d11 \n" "vmlal.s16 q0, d6, d4[0] \n" "vmlal.s16 q1, d6, d4[1] \n" "vmlal.s16 q0, d7, d4[2] \n" "vmlal.s16 q1, d7, d4[3] \n" "vmlal.s16 q0, d8, d5[0] \n" "vmlal.s16 q1, d8, d5[1] \n" "vmlal.s16 q0, d9, d5[2] \n" "vmlal.s16 q1, d9, d5[3] \n" "subs r4, r4, #1 \n" "bne 4b \n" "5: \n" "and r4, %5, #3 \n" // r4 = nn1 & 3 "cmp r4, #0 \n" // w4 > 0 "beq 7f \n" "6: \n" "vld1.s8 {d4[]}, [%6]! \n" "vld1.s8 {d6[]}, [%6]! \n" "vmovl.s8 q2, d4 \n" "vmovl.s8 q3, d6 \n" "vld1.s8 {d8}, [%7] \n" "vmovl.s8 q4, d8 \n" "vmlal.s16 q0, d4, d8 \n" "vmlal.s16 q1, d6, d8 \n" "add %7, %7, #4 \n" "subs r4, r4, #1 \n" "bne 6b \n" "7: \n" "vst1.s32 {d0[0]}, [%0]! \n" "vst1.s32 {d0[1]}, [%1]! \n" "vst1.s32 {d1[0]}, [%2]! \n" "vst1.s32 {d1[1]}, [%3]! \n" "vst1.s32 {d2[0]}, [%0]! \n" "vst1.s32 {d2[1]}, [%1]! \n" "vst1.s32 {d3[0]}, [%2]! \n" "vst1.s32 {d3[1]}, [%3]! \n" :"=r"(outptr0), "=r"(outptr1), "=r"(outptr2), "=r"(outptr3), "=r"(nn), "=r"(nn1), "=r"(tmpptr), "=r"(kptr0) :"0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(nn), "5"(nn1), "6"(tmpptr), "7"(kptr0) :"memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif /* // __aarch64__ */ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char *kptr0 = kernel.channel(p / 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); #if __ARM_FEATURE_DOTPROD for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } #else /* // __ARM_FEATURE_DOTPROD */ if (nn > 0) { int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); _sum0 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); _sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif } #endif /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 3 < nn1; j += 4) { int16x4_t _val0123 = vget_low_s16(vmovl_s8(vld1_s8(tmpptr))); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _w01234567 = vmovl_s8(vget_low_s8(_w)); int16x8_t _w89abcdef = vmovl_s8(vget_high_s8(_w)); int16x4_t _w0123 = vget_low_s16(_w01234567); int16x4_t _w4567 = vget_high_s16(_w01234567); int16x4_t _w89ab = vget_low_s16(_w89abcdef); int16x4_t _wcdef = vget_high_s16(_w89abcdef); _sum0 = vmlal_lane_s16(_sum0, _w0123, _val0123, 0); _sum1 = vmlal_lane_s16(_sum1, _w4567, _val0123, 1); _sum0 = vmlal_lane_s16(_sum0, _w89ab, _val0123, 2); _sum1 = vmlal_lane_s16(_sum1, _wcdef, _val0123, 3); tmpptr += 4; kptr0 += 16; } for (; j < nn1; j++) { int16x4_t _val = vdup_n_s16(tmpptr[0]); int16x4_t _w0123; _w0123 = vset_lane_s16(kptr0[0], _w0123, 0); _w0123 = vset_lane_s16(kptr0[1], _w0123, 1); _w0123 = vset_lane_s16(kptr0[2], _w0123, 2); _w0123 = vset_lane_s16(kptr0[3], _w0123, 3); _sum0 = vmlal_s16(_sum0, _val, _w0123); tmpptr += 1; kptr0 += 4; } _sum0 = vaddq_s32(_sum0, _sum1); vst1q_lane_s32(outptr0, _sum0, 0); vst1q_lane_s32(outptr1, _sum0, 1); vst1q_lane_s32(outptr2, _sum0, 2); vst1q_lane_s32(outptr3, _sum0, 3); outptr0 += 1; outptr1 += 1; outptr2 += 1; outptr3 += 1; } } remain_outch_start += nn_outch << 2; #endif /* // __ARM_NEON */ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int *outptr0 = top_blob.channel(p); int i = 0; #if __ARM_NEON #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char *tmpptr = tmp.channel(i / 16); const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32); int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48); int8x16_t _val0123_h = vld1q_s8(tmpptr + 64); int8x16_t _val4567_h = vld1q_s8(tmpptr + 80); int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96); int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0); _sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0); _sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1); _sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1); _sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1); tmpptr += 128; kptr0 += 8; } int j = 0; for (; j < nn1; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; } for (; i + 7 < size; i += 8) { const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); if (nn > 0) { int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0); _sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1); _sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1); tmpptr += 64; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); } int j = 0; for (; j < nn1; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_dup_s8(kptr0); int16x8_t _s = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s)); tmpptr += 8; kptr0 += 1; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); outptr0 += 8; } #endif /* // __ARM_FEATURE_DOTPROD */ for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char *tmpptr = tmp.channel(i / 4); #endif const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x4_t _sum0 = vdupq_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0); _sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); #else /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _val2 = vld1q_s8(tmpptr + 32); int8x16_t _val3 = vld1q_s8(tmpptr + 48); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w)); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w)); _s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w)); _s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 64; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w); int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w); int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); _sum4 = vaddw_s16(_sum4, vget_low_s16(_s2)); _sum5 = vaddw_s16(_sum5, vget_high_s16(_s2)); _sum6 = vaddw_s16(_sum6, vget_low_s16(_s3)); _sum7 = vaddw_s16(_sum7, vget_high_s16(_s3)); tmpptr += 32; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); _sum4 = vaddq_s32(_sum4, _sum5); _sum6 = vaddq_s32(_sum6, _sum7); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4)); int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6)); int32x2_t _ss0 = vpadd_s32(_s0, _s2); int32x2_t _ss1 = vpadd_s32(_s4, _s6); _sum0 = vcombine_s32(_ss0, _ss1); #endif /* // __ARM_FEATURE_DOTPROD */ } int sum0123[4] = {0, 0, 0, 0}; int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char val2 = tmpptr[2]; signed char val3 = tmpptr[3]; signed char w = kptr0[0]; sum0123[0] += val0 * w; sum0123[1] += val1 * w; sum0123[2] += val2 * w; sum0123[3] += val3 * w; tmpptr += 4; kptr0 += 1; } _sum0 = vaddq_s32(_sum0, vld1q_s32(sum0123)); vst1q_s32(outptr0, _sum0); outptr0 += 4; } #endif /* // __aarch64__ */ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2); #endif const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int32x2_t _sum = vdup_n_s32(0); if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x2_t _sum0 = vdup_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j < nn; j++) { int8x16_t _val01_lh = vld1q_s8(tmpptr); int8x8_t _w_lh = vld1_s8(kptr0); _sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0); _sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1); tmpptr += 16; kptr0 += 8; } _sum = vadd_s32(_sum0, _sum1); #else /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w)); int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w)); _s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w)); _s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 32; kptr0 += 16; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w); int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s0)); _sum2 = vaddw_s16(_sum2, vget_low_s16(_s1)); _sum3 = vaddw_s16(_sum3, vget_high_s16(_s1)); tmpptr += 16; kptr0 += 8; } _sum0 = vaddq_s32(_sum0, _sum1); _sum2 = vaddq_s32(_sum2, _sum3); int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); _sum = vpadd_s32(_s0, _s2); #endif /* // __ARM_FEATURE_DOTPROD */ } int sum01[2] = {0, 0}; int j = 0; for (; j < nn1; j++) { signed char val0 = tmpptr[0]; signed char val1 = tmpptr[1]; signed char w = kptr0[0]; sum01[0] += val0 * w; sum01[1] += val1 * w; tmpptr += 2; kptr0 += 1; } _sum = vadd_s32(_sum, vld1_s32(sum01)); vst1_s32(outptr0, _sum); outptr0 += 2; } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char *tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char *tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char *tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char *kptr0 = kernel.channel(p / 4 + p % 4); int nn = (inch / 8) * maxk; int nn1 = (inch % 8) * maxk; int sum = 0; if (nn > 0) { #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x2_t _sum1 = vdup_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); _sum0 = vdotq_s32(_sum0, _val, _w); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); _sum1 = vdot_s32(_sum1, _val, _w); tmpptr += 8; kptr0 += 8; } sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1); #else /* // __ARM_FEATURE_DOTPROD */ int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w = vld1q_s8(kptr0); int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w)); _s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 16; kptr0 += 16; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x8_t _w = vld1_s8(kptr0); int16x8_t _s8 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s8)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s8)); tmpptr += 8; kptr0 += 8; } int32x4_t _sum = vaddq_s32(_sum0, _sum1); #if __aarch64__ sum = vaddvq_s32(_sum); //dot #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); sum = vget_lane_s32(_ss, 0); #endif #endif /* // __ARM_FEATURE_DOTPROD */ } int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #else /* // __ARM_NEON */ for (; i < size; i++) { const signed char *tmpptr = tmp.channel(i); const signed char *kptr0 = kernel.channel(p); int nn1 = inch * maxk; int sum = 0; int j = 0; for (; j < nn1; j++) { signed char val = tmpptr[0]; signed char w = kptr0[0]; sum += val * w; tmpptr += 1; kptr0 += 1; } outptr0[0] = sum; outptr0 += 1; } #endif /* // __ARM_NEON */ } } static void convolution_im2col_sgemm_transform_kernel_int8_neon(const Mat & _kernel, Mat & kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; #if __ARM_NEON //interleave // src = maxk - inch - outch // dst = 8 a - 4 b - maxk - inch / 8 a - outch / 4 b // dst = 4 a - 4 b - 2 - maxk - inch / 8 a - outch / 4 b(arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); if (outch >= 4) { if (inch >= 8) kernel_tm.create(32 * maxk, inch / 8 + inch % 8, outch / 4 + outch % 4, 1u); else kernel_tm.create(4 * maxk, inch, outch / 4 + outch % 4, 1u); } else { if (inch >= 8) kernel_tm.create(8 * maxk, inch / 8 + inch % 8, outch, 1u); else kernel_tm.create(1 * maxk, inch, outch, 1u); } int q = 0; for (; q + 3 < outch; q += 4) { signed char *g00 = kernel_tm.channel(q / 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } #endif } } //TODO unroll 4 for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 4; i++) { const signed char *k00 = kernel.channel(q + i).row < const signed char >(p); g00[0] = k00[k]; g00++; } } } } //TODO unroll 2 for (; q < outch; q++) { signed char *g00 = kernel_tm.channel(q / 4 + q % 4); int p = 0; for (; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int j = 0; j < 8; j++) { const signed char *k00 = kernel.channel(q).row < const signed char >(p + j); g00[0] = k00[k]; g00++; } } } //TODO unroll 4 for (; p < inch; p++) { for (int k = 0; k < maxk; k++) { const signed char *k00 = kernel.channel(q).row < const signed char >(p); g00[0] = k00[k]; g00++; } } } #else /* // __ARM_NEON */ kernel_tm = _kernel.reshape(maxk, inch, outch); #endif /* // __ARM_NEON */ } static void convolution_im2col_sgemm_int8_neon(const Mat & bottom_blob, Mat & top_blob, const Mat & kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option & opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; //im2col Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char *ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char *sptr = img.row < const signed char >(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt); }
GB_unop__sinh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sinh_fc64_fc64 // op(A') function: GB_unop_tran__sinh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csinh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csinh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csinh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sinh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csinh (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sinh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sinh_fc64_fc64 // op(A') function: GB_unop_tran__sinh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csinh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csinh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csinh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sinh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csinh (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sinh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sinh_fc64_fc64 // op(A') function: GB_unop_tran__sinh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csinh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csinh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csinh (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sinh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csinh (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sinh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint8 // op(A') function: GB_tran__lnot_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint8 // op(A') function: GB_tran__lnot_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint8 // op(A') function: GB_tran__lnot_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
map.c
#include <stdio.h> #include <omp.h> int main() { int i; #pragma omp parallel num_threads(2) // seta o número de threads em 2 { int tid = omp_get_thread_num(); // lê o identificador da thread #pragma omp for ordered for(i = 1; i <= 3; i++) { #pragma omp ordered printf("[PRINT1] T%d = %d \n",tid,i); printf("[PRINT2] T%d = %d \n",tid,i); } } }
#include <stdio.h> #include <omp.h> int main() { int i; int tid = omp_get_thread_num(); // lê o identificador da thread for(i = 1; i <= 3; i++) { printf("[PRINT1] T%d = %d \n",tid,i); printf("[PRINT2] T%d = %d \n",tid,i); } }
#include <stdio.h> #include <omp.h> int main() { int i; #pragma omp parallel num_threads(2) // seta o número de threads em 2 { int tid = omp_get_thread_num(); // lê o identificador da thread #pragma omp for ordered for(i = 1; i <= 3; i++) { #pragma omp ordered printf("[PRINT1] T%d = %d \n",tid,i); printf("[PRINT2] T%d = %d \n",tid,i); } } }
GRANSAC.h
#pragma once #include <iostream> #include <cmath> #include <string> #include <random> #include <memory> #include <algorithm> #include <vector> #include <omp.h> #include "AbstractModel.h" namespace GRANSAC { // T - AbstractModel template <class T, int t_NumParams> class RANSAC { private: std::vector<std::shared_ptr<AbstractParameter>> m_Data; // All the data std::vector<std::shared_ptr<T>> m_SampledModels; // Vector of all sampled models std::shared_ptr<T> m_BestModel; // Pointer to the best model, valid only after Estimate() is called std::vector<std::shared_ptr<AbstractParameter>> m_BestInliers; int m_MaxIterations; // Number of iterations before termination VPFloat m_Threshold; // The threshold for computing model consensus VPFloat m_BestModelScore; // The score of the best model int m_BestModelIdx; std::vector<std::mt19937> m_RandEngines; // Mersenne twister high quality RNG that support *OpenMP* multi-threading public: RANSAC(void) { int nThreads = std::max(1, omp_get_max_threads()); std::cout << "[ INFO ]: Maximum usable threads: " << nThreads << std::endl; for (int i = 0; i < nThreads; ++i) { std::random_device SeedDevice; m_RandEngines.push_back(std::mt19937(SeedDevice())); } Reset(); }; virtual ~RANSAC(void) {}; void Reset(void) { // Clear sampled models, etc. and prepare for next call. Reset RANSAC estimator state m_Data.clear(); m_SampledModels.clear(); m_BestModelIdx = -1; m_BestModelScore = 0.0; }; void Initialize(VPFloat Threshold, int MaxIterations = 1000) { m_Threshold = Threshold; m_MaxIterations = MaxIterations; }; std::shared_ptr<T> GetBestModel(void) { return m_BestModel; }; const std::vector<std::shared_ptr<AbstractParameter>>& GetBestInliers(void) { return m_BestInliers; }; bool Estimate(const std::vector<std::shared_ptr<AbstractParameter>> &Data) { if (Data.size() <= t_NumParams) { std::cerr << "[ WARN ]: RANSAC - Number of data points is too less. Not doing anything." << std::endl; return false; } m_Data = Data; int DataSize = m_Data.size(); std::uniform_int_distribution<int> UniDist(0, int(DataSize - 1)); // Both inclusive std::vector<VPFloat> InlierFractionAccum(m_MaxIterations); std::vector<std::vector<std::shared_ptr<AbstractParameter>>> InliersAccum(m_MaxIterations); m_SampledModels.resize(m_MaxIterations); int nThreads = std::max(1, omp_get_max_threads()); omp_set_dynamic(0); // Explicitly disable dynamic teams omp_set_num_threads(nThreads); #pragma omp parallel for for (int i = 0; i < m_MaxIterations; ++i) { // Select t_NumParams random samples std::vector<std::shared_ptr<AbstractParameter>> RandomSamples(t_NumParams); std::vector<std::shared_ptr<AbstractParameter>> RemainderSamples = m_Data; // Without the chosen random samples std::shuffle(RemainderSamples.begin(), RemainderSamples.end(), m_RandEngines[omp_get_thread_num()]); // To avoid picking the same element more than once std::copy(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams, RandomSamples.begin()); RemainderSamples.erase(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams); std::shared_ptr<T> RandomModel = std::make_shared<T>(RandomSamples); // Check if the sampled model is the best so far std::pair<VPFloat, std::vector<std::shared_ptr<AbstractParameter>>> EvalPair = RandomModel->Evaluate(RemainderSamples, m_Threshold); InlierFractionAccum[i] = EvalPair.first; InliersAccum[i] = EvalPair.second; // Push back into history. Could be removed later m_SampledModels[i] = RandomModel; } for (int i = 0; i < m_MaxIterations; ++i) { if (InlierFractionAccum[i] > m_BestModelScore) { m_BestModelScore = InlierFractionAccum[i]; m_BestModelIdx = m_SampledModels.size() - 1; m_BestModel = m_SampledModels[i]; m_BestInliers = InliersAccum[i]; } } // std::cerr << "BestInlierFraction: " << m_BestModelScore << std::endl; Reset(); return true; }; }; } // namespace GRANSAC
#pragma once #include <iostream> #include <cmath> #include <string> #include <random> #include <memory> #include <algorithm> #include <vector> #include <omp.h> #include "AbstractModel.h" namespace GRANSAC { //T - AbstractModel template < class T, int t_NumParams > class RANSAC { private: std::vector < std::shared_ptr < AbstractParameter >> m_Data; //All the data std: : vector < std: :shared_ptr < T >> m_SampledModels; //Vector of all sampled models std: : shared_ptr < T > m_BestModel; //Pointer to the best model, valid only after Estimate() is called std::vector < std::shared_ptr < AbstractParameter >> m_BestInliers; int m_MaxIterations; //Number of iterations before termination VPFloat m_Threshold; //The threshold for computing model consensus VPFloat m_BestModelScore; //The score of the best model int m_BestModelIdx; std: : vector < std: : mt19937 > m_RandEngines; //Mersenne twister high quality RNG that support * OpenMP * multi - threading public: RANSAC(void) { int nThreads = std::max(1, omp_get_max_threads()); std: : cout << "[ INFO ]: Maximum usable threads: " << nThreads << std: : endl; for (int i = 0; i < nThreads; ++i) { std: : random_device SeedDevice; m_RandEngines.push_back(std: :mt19937(SeedDevice())); } Reset(); }; virtual ~ RANSAC(void) { }; void Reset(void) { //Clear sampled models, etc.and prepare for next call.Reset RANSAC estimator state m_Data.clear(); m_SampledModels.clear(); m_BestModelIdx = -1; m_BestModelScore = 0.0; }; void Initialize(VPFloat Threshold, int MaxIterations = 1000) { m_Threshold = Threshold; m_MaxIterations = MaxIterations; }; std: : shared_ptr < T > GetBestModel(void) { return m_BestModel; }; const std::vector < std::shared_ptr < AbstractParameter >> &GetBestInliers(void) { return m_BestInliers; }; bool Estimate(const std::vector < std::shared_ptr < AbstractParameter >> &Data) { if (Data.size() <= t_NumParams) { std::cerr << "[ WARN ]: RANSAC - Number of data points is too less. Not doing anything." << std::endl; return false; } m_Data = Data; int DataSize = m_Data.size(); std: : uniform_int_distribution < int >UniDist(0, int (DataSize - 1)); //Both inclusive std: : vector < VPFloat > InlierFractionAccum(m_MaxIterations); std: : vector < std: : vector < std: : shared_ptr < AbstractParameter >> >InliersAccum(m_MaxIterations); m_SampledModels.resize(m_MaxIterations); int nThreads = std::max(1, omp_get_max_threads()); omp_set_dynamic(0); //Explicitly disable dynamic teams omp_set_num_threads(nThreads); for (int i = 0; i < m_MaxIterations; ++i) { //Select t_NumParams random samples std: : vector < std: :shared_ptr < AbstractParameter >> RandomSamples(t_NumParams); std: : vector < std: :shared_ptr < AbstractParameter >> RemainderSamples = m_Data; //Without the chosen random samples std: : shuffle(RemainderSamples.begin(), RemainderSamples.end(), m_RandEngines[omp_get_thread_num()]); //To avoid picking the same element more than once std: : copy(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams, RandomSamples.begin()); RemainderSamples.erase(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams); std: : shared_ptr < T > RandomModel = std: :make_shared < T > (RandomSamples); //Check if the sampled model is the best so far std: : pair < VPFloat, std: : vector < std: :shared_ptr < AbstractParameter >> >EvalPair = RandomModel->Evaluate(RemainderSamples, m_Threshold); InlierFractionAccum[i] = EvalPair.first; InliersAccum[i] = EvalPair.second; //Push back into history.Could be removed later m_SampledModels[i] = RandomModel; } for (int i = 0; i < m_MaxIterations; ++i) { if (InlierFractionAccum[i] > m_BestModelScore) { m_BestModelScore = InlierFractionAccum[i]; m_BestModelIdx = m_SampledModels.size() - 1; m_BestModel = m_SampledModels[i]; m_BestInliers = InliersAccum[i]; } } //std: : cerr << "BestInlierFraction: " << m_BestModelScore << std: :endl; Reset(); return true; }; }; } //namespace GRANSAC
#pragma once #include <iostream> #include <cmath> #include <string> #include <random> #include <memory> #include <algorithm> #include <vector> #include <omp.h> #include "AbstractModel.h" namespace GRANSAC { //T - AbstractModel template < class T, int t_NumParams > class RANSAC { private: std::vector < std::shared_ptr < AbstractParameter >> m_Data; //All the data std: : vector < std: :shared_ptr < T >> m_SampledModels; //Vector of all sampled models std: : shared_ptr < T > m_BestModel; //Pointer to the best model, valid only after Estimate() is called std::vector < std::shared_ptr < AbstractParameter >> m_BestInliers; int m_MaxIterations; //Number of iterations before termination VPFloat m_Threshold; //The threshold for computing model consensus VPFloat m_BestModelScore; //The score of the best model int m_BestModelIdx; std: : vector < std: : mt19937 > m_RandEngines; //Mersenne twister high quality RNG that support * OpenMP * multi - threading public: RANSAC(void) { int nThreads = std::max(1, omp_get_max_threads()); std: : cout << "[ INFO ]: Maximum usable threads: " << nThreads << std: : endl; for (int i = 0; i < nThreads; ++i) { std: : random_device SeedDevice; m_RandEngines.push_back(std: :mt19937(SeedDevice())); } Reset(); }; virtual ~ RANSAC(void) { }; void Reset(void) { //Clear sampled models, etc.and prepare for next call.Reset RANSAC estimator state m_Data.clear(); m_SampledModels.clear(); m_BestModelIdx = -1; m_BestModelScore = 0.0; }; void Initialize(VPFloat Threshold, int MaxIterations = 1000) { m_Threshold = Threshold; m_MaxIterations = MaxIterations; }; std: : shared_ptr < T > GetBestModel(void) { return m_BestModel; }; const std::vector < std::shared_ptr < AbstractParameter >> &GetBestInliers(void) { return m_BestInliers; }; bool Estimate(const std::vector < std::shared_ptr < AbstractParameter >> &Data) { if (Data.size() <= t_NumParams) { std::cerr << "[ WARN ]: RANSAC - Number of data points is too less. Not doing anything." << std::endl; return false; } m_Data = Data; int DataSize = m_Data.size(); std: : uniform_int_distribution < int >UniDist(0, int (DataSize - 1)); //Both inclusive std: : vector < VPFloat > InlierFractionAccum(m_MaxIterations); std: : vector < std: : vector < std: : shared_ptr < AbstractParameter >> >InliersAccum(m_MaxIterations); m_SampledModels.resize(m_MaxIterations); int nThreads = std::max(1, omp_get_max_threads()); omp_set_dynamic(0); //Explicitly disable dynamic teams omp_set_num_threads(nThreads); #pragma omp parallel for for (int i = 0; i < m_MaxIterations; ++i) { //Select t_NumParams random samples std: : vector < std: :shared_ptr < AbstractParameter >> RandomSamples(t_NumParams); std: : vector < std: :shared_ptr < AbstractParameter >> RemainderSamples = m_Data; //Without the chosen random samples std: : shuffle(RemainderSamples.begin(), RemainderSamples.end(), m_RandEngines[omp_get_thread_num()]); //To avoid picking the same element more than once std: : copy(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams, RandomSamples.begin()); RemainderSamples.erase(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams); std: : shared_ptr < T > RandomModel = std: :make_shared < T > (RandomSamples); //Check if the sampled model is the best so far std: : pair < VPFloat, std: : vector < std: :shared_ptr < AbstractParameter >> >EvalPair = RandomModel->Evaluate(RemainderSamples, m_Threshold); InlierFractionAccum[i] = EvalPair.first; InliersAccum[i] = EvalPair.second; //Push back into history.Could be removed later m_SampledModels[i] = RandomModel; } for (int i = 0; i < m_MaxIterations; ++i) { if (InlierFractionAccum[i] > m_BestModelScore) { m_BestModelScore = InlierFractionAccum[i]; m_BestModelIdx = m_SampledModels.size() - 1; m_BestModel = m_SampledModels[i]; m_BestInliers = InliersAccum[i]; } } //std: : cerr << "BestInlierFraction: " << m_BestModelScore << std: :endl; Reset(); return true; }; }; } //namespace GRANSAC
pr58551.c
/* PR middle-end/58551 */ /* { dg-do compile } */ /* { dg-options "-O0 -fopenmp" } */ void foo (int *a) { int i; for (i = 0; i < 8; i++) #pragma omp task if (a[i]) __builtin_abort (); } void bar (int, int); void baz (int *a) { int i; for (i = 0; i < 8; i++) #pragma omp task if (a[i]) { int j, k; for (j = 0; j < 10; j++) for (k = 0; k < 8; k++) bar (j, k); for (k = 0; k < 12; k++) bar (-1, k); __builtin_abort (); } }
/* PR middle-end/58551 */ /* { dg-do compile } */ /* { dg-options "-O0 -fopenmp" } */ void foo(int *a) { int i; for (i = 0; i < 8; i++) if (a[i]) __builtin_abort(); } void bar(int, int); void baz(int *a) { int i; for (i = 0; i < 8; i++) if (a[i]) { int j, k; for (j = 0; j < 10; j++) for (k = 0; k < 8; k++) bar(j, k); for (k = 0; k < 12; k++) bar(-1, k); __builtin_abort(); } }
/* PR middle-end/58551 */ /* { dg-do compile } */ /* { dg-options "-O0 -fopenmp" } */ void foo(int *a) { int i; for (i = 0; i < 8; i++) #pragma omp task if (a[i]) __builtin_abort(); } void bar(int, int); void baz(int *a) { int i; for (i = 0; i < 8; i++) #pragma omp task if (a[i]) { int j, k; for (j = 0; j < 10; j++) for (k = 0; k < 8; k++) bar(j, k); for (k = 0; k < 12; k++) bar(-1, k); __builtin_abort(); } }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(24*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(24*t3+Nx+11,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= 2 * Nt - 2; t1++) { lbp = ceild(t1 + 2, 2); ubp = min(floord(4 * Nt + Nz - 9, 4), floord(2 * t1 + Nz - 4, 4)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(ceild(t1 - 8, 12), ceild(4 * t2 - Nz - 11, 24)); t3 <= min(min(floord(4 * Nt + Ny - 9, 24), floord(2 * t1 + Ny - 3, 24)), floord(4 * t2 + Ny - 9, 24)); t3++) { for (t4 = max(max(ceild(t1 - 124, 128), ceild(4 * t2 - Nz - 243, 256)), ceild(24 * t3 - Ny - 243, 256)); t4 <= min(min(min(floord(4 * Nt + Nx - 9, 256), floord(2 * t1 + Nx - 3, 256)), floord(4 * t2 + Nx - 9, 256)), floord(24 * t3 + Nx + 11, 256)); t4++) { for (t5 = max(max(max(ceild(t1, 2), ceild(4 * t2 - Nz + 5, 4)), ceild(24 * t3 - Ny + 5, 4)), ceild(256 * t4 - Nx + 5, 4)); t5 <= floord(t1 + 1, 2); t5++) { for (t6 = max(4 * t2, -4 * t1 + 4 * t2 + 8 * t5 - 3); t6 <= min(min(4 * t2 + 3, -4 * t1 + 4 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(24 * t3, 4 * t5 + 4); t7 <= min(24 * t3 + 23, 4 * t5 + Ny - 5); t7++) { lbv = max(256 * t4, 4 * t5 + 4); ubv = min(256 * t4 + 255, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= 2 * Nt - 2; t1++) { lbp = ceild(t1 + 2, 2); ubp = min(floord(4 * Nt + Nz - 9, 4), floord(2 * t1 + Nz - 4, 4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(ceild(t1 - 8, 12), ceild(4 * t2 - Nz - 11, 24)); t3 <= min(min(floord(4 * Nt + Ny - 9, 24), floord(2 * t1 + Ny - 3, 24)), floord(4 * t2 + Ny - 9, 24)); t3++) { for (t4 = max(max(ceild(t1 - 124, 128), ceild(4 * t2 - Nz - 243, 256)), ceild(24 * t3 - Ny - 243, 256)); t4 <= min(min(min(floord(4 * Nt + Nx - 9, 256), floord(2 * t1 + Nx - 3, 256)), floord(4 * t2 + Nx - 9, 256)), floord(24 * t3 + Nx + 11, 256)); t4++) { for (t5 = max(max(max(ceild(t1, 2), ceild(4 * t2 - Nz + 5, 4)), ceild(24 * t3 - Ny + 5, 4)), ceild(256 * t4 - Nx + 5, 4)); t5 <= floord(t1 + 1, 2); t5++) { for (t6 = max(4 * t2, -4 * t1 + 4 * t2 + 8 * t5 - 3); t6 <= min(min(4 * t2 + 3, -4 * t1 + 4 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(24 * t3, 4 * t5 + 4); t7 <= min(24 * t3 + 23, 4 * t5 + Ny - 5); t7++) { lbv = max(256 * t4, 4 * t5 + 4); ubv = min(256 * t4 + 255, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((((((((((((coef[0][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef[1][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]))) + (coef[3][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef[4][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[5][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]))) + (coef[6][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef[7][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[8][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]))) + (coef[9][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef[10][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]))) + (coef[11][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]))) + (coef[12][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4] + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__pair_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_uint32 // A.*B function (eWiseMult): GB_AemultB__pair_uint32 // A*D function (colscale): GB_AxD__pair_uint32 // D*A function (rowscale): GB_DxB__pair_uint32 // C+=B function (dense accum): GB_Cdense_accumB__pair_uint32 // C+=b function (dense accum): GB_Cdense_accumb__pair_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_uint32 // A.*B function (eWiseMult): GB_AemultB__pair_uint32 // A*D function (colscale): GB_AxD__pair_uint32 // D*A function (rowscale): GB_DxB__pair_uint32 // C+=B function (dense accum): GB_Cdense_accumB__pair_uint32 // C+=b function (dense accum): GB_Cdense_accumb__pair_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_uint32 // A.*B function (eWiseMult): GB_AemultB__pair_uint32 // A*D function (colscale): GB_AxD__pair_uint32 // D*A function (rowscale): GB_DxB__pair_uint32 // C+=B function (dense accum): GB_Cdense_accumB__pair_uint32 // C+=b function (dense accum): GB_Cdense_accumb__pair_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_uint32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unaryop__lnot_int16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_bool // op(A') function: GB_tran__lnot_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_bool // op(A') function: GB_tran__lnot_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_bool // op(A') function: GB_tran__lnot_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_bool ( int16_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
struct.c
// RUN: %libomptarget-compile-generic -fopenmp-extensions // RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace // Wrong results on amdgpu // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-oldDriver #include <omp.h> #include <stdio.h> #define CHECK_PRESENCE(Var1, Var2, Var3) \ printf(" presence of %s, %s, %s: %d, %d, %d\n", \ #Var1, #Var2, #Var3, \ omp_target_is_present(&(Var1), omp_get_default_device()), \ omp_target_is_present(&(Var2), omp_get_default_device()), \ omp_target_is_present(&(Var3), omp_get_default_device())) #define CHECK_VALUES(Var1, Var2) \ printf(" values of %s, %s: %d, %d\n", \ #Var1, #Var2, (Var1), (Var2)) int main() { struct S { int i; int j; } s; // CHECK: presence of s, s.i, s.j: 0, 0, 0 CHECK_PRESENCE(s, s.i, s.j); // ======================================================================= // Check that ompx_hold keeps entire struct present. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on first member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(ompx_hold,tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.i applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on last member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(tofrom: s.i) \ map(ompx_hold,tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.j applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on struct\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold,tofrom: s) map(tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ======================================================================= // Check that transfer to/from host checks reference count correctly. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent DynRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(to: s.i, s.j) { // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent HoldRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) #pragma omp target data map(tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(ompx_hold, to: s.i, s.j) { // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} // // At the beginning of a region, if the parent's TotalRefCount=1, then the // transfer should happen. // // At the end of a region, it also must be true that the reference count being // decremented is the reference count that is 1. printf("check: parent TotalRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, tofrom: s.i, s.j) { s.i = 21; s.j = 31; } #pragma omp target exit data map(from: s.i, s.j) // No transfer here even though parent's TotalRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); return 0; }
// RUN: %libomptarget-compile-generic -fopenmp-extensions // RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace // Wrong results on amdgpu // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-oldDriver #include <omp.h> #include <stdio.h> #define CHECK_PRESENCE(Var1, Var2, Var3) \ printf(" presence of %s, %s, %s: %d, %d, %d\n", \ #Var1, #Var2, #Var3, \ omp_target_is_present(&(Var1), omp_get_default_device()), \ omp_target_is_present(&(Var2), omp_get_default_device()), \ omp_target_is_present(&(Var3), omp_get_default_device())) #define CHECK_VALUES(Var1, Var2) \ printf(" values of %s, %s: %d, %d\n", \ #Var1, #Var2, (Var1), (Var2)) int main() { struct S { int i; int j; } s; // CHECK: presence of s, s.i, s.j: 0, 0, 0 CHECK_PRESENCE(s, s.i, s.j); // ======================================================================= // Check that ompx_hold keeps entire struct present. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on first member\n"); s.i = 20; s.j = 30; map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); s.i = 21; s.j = 31; // ompx_hold on s.i applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on last member\n"); s.i = 20; s.j = 30; map(ompx_hold,tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); s.i = 21; s.j = 31; // ompx_hold on s.j applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on struct\n"); s.i = 20; s.j = 30; map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); s.i = 21; s.j = 31; // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ======================================================================= // Check that transfer to/from host checks reference count correctly. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent DynRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent HoldRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} // // At the beginning of a region, if the parent's TotalRefCount=1, then the // transfer should happen. // // At the end of a region, it also must be true that the reference count being // decremented is the reference count that is 1. printf("check: parent TotalRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, tofrom: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's TotalRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); return 0; }
// RUN: %libomptarget-compile-generic -fopenmp-extensions // RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace // Wrong results on amdgpu // XFAIL: amdgcn-amd-amdhsa // XFAIL: amdgcn-amd-amdhsa-oldDriver #include <omp.h> #include <stdio.h> #define CHECK_PRESENCE(Var1, Var2, Var3) \ printf(" presence of %s, %s, %s: %d, %d, %d\n", \ #Var1, #Var2, #Var3, \ omp_target_is_present(&(Var1), omp_get_default_device()), \ omp_target_is_present(&(Var2), omp_get_default_device()), \ omp_target_is_present(&(Var3), omp_get_default_device())) #define CHECK_VALUES(Var1, Var2) \ printf(" values of %s, %s: %d, %d\n", \ #Var1, #Var2, (Var1), (Var2)) int main() { struct S { int i; int j; } s; // CHECK: presence of s, s.i, s.j: 0, 0, 0 CHECK_PRESENCE(s, s.i, s.j); // ======================================================================= // Check that ompx_hold keeps entire struct present. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on first member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(ompx_hold,tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.i applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on last member\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) map(tofrom: s.i) \ map(ompx_hold,tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // ompx_hold on s.j applies to all of s. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: ompx_hold only on struct\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold,tofrom: s) map(tofrom: s.i) \ map(tofrom: s.j) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(tofrom: s) { s.i = 21; s.j = 31; } #pragma omp target exit data map(delete: s, s.i) // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ======================================================================= // Check that transfer to/from host checks reference count correctly. // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent DynRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(to: s.i, s.j) { // No transfer here even though parent's DynRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} printf("check: parent HoldRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(tofrom: s) #pragma omp target data map(tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, from: s.i, s.j) { s.i = 21; s.j = 31; } // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); #pragma omp target map(ompx_hold, to: s.i, s.j) { // No transfer here even though parent's HoldRefCount=1. // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_VALUES(s.i, s.j); } } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); // ----------------------------------------------------------------------- // CHECK-LABEL: check:{{.*}} // // At the beginning of a region, if the parent's TotalRefCount=1, then the // transfer should happen. // // At the end of a region, it also must be true that the reference count being // decremented is the reference count that is 1. printf("check: parent TotalRefCount=1 is not sufficient for transfer\n"); s.i = 20; s.j = 30; #pragma omp target data map(ompx_hold, tofrom: s) { // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 CHECK_PRESENCE(s, s.i, s.j); #pragma omp target map(ompx_hold, tofrom: s.i, s.j) { s.i = 21; s.j = 31; } #pragma omp target exit data map(from: s.i, s.j) // No transfer here even though parent's TotalRefCount=1. // CHECK-NEXT: presence of s, s.i, s.j: 1, 1, 1 // CHECK-NEXT: values of s.i, s.j: 20, 30 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); } // CHECK-NEXT: presence of s, s.i, s.j: 0, 0, 0 // CHECK-NEXT: values of s.i, s.j: 21, 31 CHECK_PRESENCE(s, s.i, s.j); CHECK_VALUES(s.i, s.j); return 0; }
main.c
#include "header.h" #include "FILE/nrutil.h" #include "FILE/stat.c" #include "cost.c" int main(int argc, const char * argv[]) { // Declare Variables FILE *inp, *JIN, *HUR, *OUT, *PRT, *JYW, *ASA, *IMS, *JJW; char buf[255], frname[255]; int stime; long ltime; int ind, ite, a, b, i, j, k, l, v, accept, gcount, mcount, mutmp, *count, show1, show2; double num, den, un, ratio; double old_like_beta, new_like_beta, old_like_theta, new_like_theta; double update_like_samp, update_like_item, tmp_oldmu, tmp_newmu; double post_a, post_b, school_a, school_b; double *old_samp_distance, *new_samp_distance, *sample_samp_like; double *old_item_distance, *new_item_distance, *sample_item_like; double **sum_mu, **mu_dist, **sum_mu_dist; double **sample_tau, *sum_tau, *var_tau; double **sample_sigma, *sum_sigma, *var_sigma; double **sample_delta, *sum_delta, *var_delta; double **sample_gamma, *sum_gamma, *var_gamma; double **sample_varphi, *sum_varphi, *var_varphi; double var_fix, avg_fix, *var_ran, *avg_ran, avg_beta, var_beta; MM = atoi(argv[1]); // Set Random Seed ltime = time(NULL); stime = (unsigned int)ltime/2; srand(stime); printf("nseed = %d\n", stime); // Input Number of Thread /*# pragma omp parallel { #if defined (_OPENMP) k = omp_get_num_threads(); printf("k = %d\n", k); srand(((unsigned int)time(NULL))^k); #endif }*/ // Input Parameters inp = fopen("DATA/parameter.txt", "r"); if(inp == NULL) {printf("Can't open data file\n"); return 0;} fscanf(inp, "%d", &niter); fscanf(inp, "%d", &nburn); fscanf(inp, "%d", &thin); fscanf(inp, "%d", &print); fscanf(inp, "%d", &repeat); fscanf(inp, "%lf", &jump_beta); fscanf(inp, "%lf", &jump_theta); fscanf(inp, "%lf", &jump_mu); fscanf(inp, "%lf", &jump_W); fclose(inp); // The Number of Respondents by Schools ncount = ivector(1, nSCHOOL); inp = fopen("DATA/count.txt", "r"); for(i = 1; i <= nSCHOOL; i++) fscanf(inp, "%d", &ncount[i]); fclose(inp); jump_Z = dvector(1, 10); inp = fopen("DATA/jumprule.txt", "r"); for(i = 1; i <= 10; i++) fscanf(inp, "%lf", &jump_Z[i]); fclose(inp); jump_index = imatrix(1, nSCHOOL, 1, nITEM); inp = fopen("DATA/jumpitem.txt", "r"); for(i = 1; i <= nSCHOOL; i++) for(j = 1; j <= nITEM; j++) fscanf(inp, "%d", &jump_index[i][j]); fclose(inp); // Declare typedef structure and set array of variables in typedef structure totalsize = sizeof(SCHOOL) + sizeof(int) * (nMAX+1)*(nITEM+1); totalsize += sizeof(int) * (nMAX+1) + sizeof(int) * (nITEM+1); totalsize += sizeof(int) * (nITEM+1)*(nMAX+1)*(nMAX+1); totalsize += sizeof(int) * (nMAX+1)*(nITEM+1)*(nITEM+1); totalsize += sizeof(double) * ((nITEM+1)*2 + (nMAX+1)*2) + sizeof(double) * ((nITEM+1)*(nITEM+1)*2); totalsize += sizeof(double) * ((nMAX+1)*(nDIM+1)*4 + (nITEM+1)*(nDIM+1)*2); totalsize += sizeof(double) * (((niter-nburn)/thin+1)*(nITEM+1) + (nITEM+1)*3); totalsize += sizeof(double) * (((niter-nburn)/thin+1)*(nMAX+1) + (nMAX+1)*3); totalsize += sizeof(double) * (((niter-nburn)/thin+1)*((nMAX+1)*(nDIM+1) + (nITEM+1)*(nDIM+1))); totalsize += sizeof(double) * (((niter-nburn)/thin+1) + (nDIM+1)); totalsize += sizeof(double) * ((nMAX+1)*(nDIM+1)*2 + (nITEM+1)*(nDIM+1)*2 + (nMAX+1) + (nITEM+1)); totalsize += sizeof(double) * ((nITEM+1)*(nITEM+1)*3); SCHOOL = (YEWON *)malloc(totalsize * (nSCHOOL+1)); for(k = 0; k <= nSCHOOL; k++){ SCHOOL[k].cbsize = totalsize; SCHOOL[k].dataset = (int**)malloc(sizeof(int*)*(nMAX+1)); SCHOOL[k].count_samp = (int*)malloc(sizeof(int*)*(nMAX+1)); SCHOOL[k].count_item = (int*)malloc(sizeof(int*)*(nITEM+1)); SCHOOL[k].Y = (int***)malloc(sizeof(int**)*(nITEM+1)); SCHOOL[k].U = (int***)malloc(sizeof(int**)*(nMAX+1)); SCHOOL[k].oldbeta = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].newbeta = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].oldtheta = (double*)malloc(sizeof(double)*(nMAX+1)); SCHOOL[k].newtheta = (double*)malloc(sizeof(double)*(nMAX+1)); SCHOOL[k].old_Zsamp = (double**)malloc(sizeof(double*)*(nMAX+1)); SCHOOL[k].new_Zsamp = (double**)malloc(sizeof(double*)*(nMAX+1)); SCHOOL[k].old_Zmean = (double**)malloc(sizeof(double*)*(nMAX+1)); SCHOOL[k].new_Zmean = (double**)malloc(sizeof(double*)*(nMAX+1)); SCHOOL[k].old_Zitem = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].new_Zitem = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].mean_Z = (double*)malloc(sizeof(double)*(nDIM+1)); SCHOOL[k].sample_beta = (double**)malloc(sizeof(double*)*((niter-nburn)/thin+1)); SCHOOL[k].sample_theta = (double**)malloc(sizeof(double*)*((niter-nburn)/thin+1)); SCHOOL[k].sample_sigma = (double*)malloc(sizeof(double)*((niter-nburn)/thin+1)); SCHOOL[k].sum_beta = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].var_beta = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].acc_beta = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].sum_theta = (double*)malloc(sizeof(double)*(nMAX+1)); SCHOOL[k].var_theta = (double*)malloc(sizeof(double)*(nMAX+1)); SCHOOL[k].acc_theta = (double*)malloc(sizeof(double)*(nMAX+1)); SCHOOL[k].sample_Zsamp = (double***)malloc(sizeof(double**)*((niter-nburn)/thin+1)); SCHOOL[k].sample_Zitem = (double***)malloc(sizeof(double**)*((niter-nburn)/thin+1)); SCHOOL[k].sample_item_mat = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].sum_Zsamp = (double**)malloc(sizeof(double*)*(nMAX+1)); SCHOOL[k].var_Zsamp = (double**)malloc(sizeof(double*)*(nMAX+1)); SCHOOL[k].acc_Zsamp = (double*)malloc(sizeof(double)*(nMAX+1)); SCHOOL[k].sum_Zitem = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].var_Zitem = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].acc_Zitem = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].old_item_mat = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].new_item_mat = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].sum_item_mat = (double**)malloc(sizeof(double*)*(nITEM+1)); SCHOOL[k].var_item_mat = (double**)malloc(sizeof(double*)*(nITEM+1)); for(i = 0; i <= nMAX; i++) SCHOOL[k].dataset[i] = (int*)malloc(sizeof(int)*(nITEM+1)); for(i = 0; i <= nITEM; i++){ SCHOOL[k].Y[i] = (int**)malloc(sizeof(int*)*(nMAX+1)); for(a = 0; a <= nMAX; a++) SCHOOL[k].Y[i][a] = (int*)malloc(sizeof(int)*(nMAX+1)); } for(i = 0; i <= nMAX; i++){ SCHOOL[k].U[i] = (int**)malloc(sizeof(int*)*(nITEM+1)); for(a = 0; a <= nITEM; a++) SCHOOL[k].U[i][a] = (int*)malloc(sizeof(int)*(nITEM+1)); } for(i = 0; i <= nMAX; i++){ SCHOOL[k].old_Zsamp[i] = (double*)malloc(sizeof(double)*(nDIM+1)); SCHOOL[k].new_Zsamp[i] = (double*)malloc(sizeof(double)*(nDIM+1)); SCHOOL[k].old_Zmean[i] = (double*)malloc(sizeof(double)*(nDIM+1)); SCHOOL[k].new_Zmean[i] = (double*)malloc(sizeof(double)*(nDIM+1)); } for(i = 0; i <= nITEM; i++){ SCHOOL[k].old_Zitem[i] = (double*)malloc(sizeof(double)*(nDIM+1)); SCHOOL[k].new_Zitem[i] = (double*)malloc(sizeof(double)*(nDIM+1)); } for(i = 0; i <= (niter-nburn)/thin; i++){ SCHOOL[k].sample_beta[i] = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].sample_theta[i] = (double*)malloc(sizeof(double)*(nMAX+1)); SCHOOL[k].sample_Zsamp[i] = (double**)malloc(sizeof(double*)*(nMAX+1)); SCHOOL[k].sample_Zitem[i] = (double**)malloc(sizeof(double*)*(nITEM+1)); for(j = 0; j <= nMAX; j++) SCHOOL[k].sample_Zsamp[i][j] = (double*)malloc(sizeof(double)*(nDIM+1)); for(j = 0; j <= nITEM; j++) SCHOOL[k].sample_Zitem[i][j] = (double*)malloc(sizeof(double)*(nDIM+1)); } for(i = 0; i <= nMAX; i++){ SCHOOL[k].sum_Zsamp[i] = (double*)malloc(sizeof(double)*(nDIM+1)); SCHOOL[k].var_Zsamp[i] = (double*)malloc(sizeof(double)*(nDIM+1)); } for(i = 0; i <= nITEM; i++){ SCHOOL[k].sum_Zitem[i] = (double*)malloc(sizeof(double)*(nDIM+1)); SCHOOL[k].var_Zitem[i] = (double*)malloc(sizeof(double)*(nDIM+1)); } for(i = 0; i <= nITEM; i++){ SCHOOL[k].sample_item_mat[i] = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].old_item_mat[i] = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].new_item_mat[i] = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].sum_item_mat[i] = (double*)malloc(sizeof(double)*(nITEM+1)); SCHOOL[k].var_item_mat[i] = (double*)malloc(sizeof(double)*(nITEM+1)); } printf("MEMORY SETTING: %.2d\n", k); } count = ivector(1, nSCHOOL); oldmu = dmatrix(1, nITEM * (nITEM - 1) / 2, 0, nSCHOOL); olddelta = dvector(1, nITEM * (nITEM - 1) / 2); oldsigma = dvector(1, nSCHOOL); oldtau = dvector(1, nITEM * (nITEM - 1) / 2); oldgamma = dvector(1, nITEM); oldvarphi = dvector(1, nITEM); sample_sigma = dmatrix(1, (niter-nburn) / thin, 1, nSCHOOL); sample_delta = dmatrix(1, (niter-nburn) / thin, 1, nITEM * (nITEM - 1) / 2); sample_tau = dmatrix(1, (niter-nburn) / thin, 1, nITEM * (nITEM - 1) / 2); sample_gamma = dmatrix(1, (niter-nburn) / thin, 1, nITEM); sample_varphi = dmatrix(1, (niter-nburn) / thin, 1, nITEM); sum_mu = dmatrix(1, nITEM * (nITEM - 1) / 2, 0, nSCHOOL); sum_tau = dvector(1, nITEM * (nITEM - 1) / 2); var_tau = dvector(1, nITEM * (nITEM - 1) / 2); sum_sigma = dvector(1, nSCHOOL); var_sigma = dvector(1, nSCHOOL); sum_delta = dvector(1, nITEM * (nITEM - 1) / 2); var_delta = dvector(1, nITEM * (nITEM - 1) / 2); sum_gamma = dvector(1, nITEM); var_gamma = dvector(1, nITEM); sum_varphi = dvector(1, nITEM); var_varphi = dvector(1, nITEM); mu_dist = dmatrix(1, nSCHOOL, 1, nSCHOOL); sum_mu_dist = dmatrix(1, nSCHOOL, 1, nSCHOOL); avg_ran = dvector(1, nSCHOOL); var_ran = dvector(1, nSCHOOL); frname[0] = 'D'; frname[1] = 'A'; frname[2] = 'T'; frname[3] = 'A'; frname[4] = '/'; frname[5] = 'i'; frname[6] = 't'; frname[7] = 'e'; frname[8] = 'm'; frname[11] = '.'; frname[12] = 't'; frname[13] = 'x'; frname[14] = 't'; frname[15] = '\0'; for(k = 0; k <= nSCHOOL; k++){ for(i = 0; i <= nMAX; i++) SCHOOL[k].count_samp[i] = 0; for(i = 0; i <= nITEM; i++) SCHOOL[k].count_item[i] = 0; for(i = 0; i <= nMAX; i++) for(j = 0; j <= nITEM; j++) SCHOOL[k].dataset[i][j] = 0; for(i = 0; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = SCHOOL[k].newbeta[i] = 0.0; for(i = 0; i <= nMAX; i++) SCHOOL[k].oldtheta[i] = SCHOOL[k].newtheta[i] = 0.0; for(i = 0; i <= nITEM; i++) for(j = 0; j <= nITEM; j++) SCHOOL[k].old_item_mat[i][j] = SCHOOL[k].new_item_mat[i][j] = 0.0; for(i = 0; i <= nITEM; i++) for(a = 0; a <= nMAX; a++) for(b = 0; b <= nMAX; b++) SCHOOL[k].Y[i][a][b] = 0; for(i = 0; i <= nMAX; i++) for(a = 0; a <= nITEM; a++) for(b = 0; b <= nITEM; b++) SCHOOL[k].U[i][a][b] = 0; for(i = 0; i <= nMAX; i++) for(j = 0; j <= nDIM; j++) SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] = SCHOOL[k].new_Zmean[i][j] = 0.0; for(i = 0; i <= nITEM; i++) for(j = 0; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = 0.0; for(i = 0; i <= (niter-nburn)/thin; i++){ SCHOOL[k].sample_sigma[i] = 0.0; for(j = 0; j <= nITEM; j++) SCHOOL[k].sample_beta[i][j] = 0.0; for(j = 0; j <= nMAX; j++) SCHOOL[k].sample_theta[i][j] = 0.0; for(a = 0; a <= nMAX; a++) for(b = 0; b <= nDIM; b++) SCHOOL[k].sample_Zsamp[i][a][b] = 0.0; for(a = 0; a <= nITEM; a++) for(b = 0; b <= nDIM; b++) SCHOOL[k].sample_Zitem[i][a][b] = 0.0; } SCHOOL[k].oldsigma = 0.0; SCHOOL[k].sum_sigma = SCHOOL[k].var_sigma = 0.0; for(i = 0; i <= nDIM; i++) SCHOOL[k].mean_Z[i] = 0.0; for(i = 0; i <= nITEM; i++) SCHOOL[k].var_beta[i] = SCHOOL[k].sum_beta[i] = SCHOOL[k].acc_beta[i] = 0.0; for(i = 0; i <= nMAX; i++) SCHOOL[k].var_theta[i] = SCHOOL[k].sum_theta[i] = SCHOOL[k].acc_theta[i] = 0.0; for(i = 0; i <= nMAX; i++) for(j = 0; j <= nDIM; j++) SCHOOL[k].sum_Zsamp[i][j] = SCHOOL[k].var_Zsamp[i][j] = 0.0; for(i = 0; i <= nITEM; i++) for(j = 0; j <= nDIM; j++) SCHOOL[k].sum_Zitem[i][j] = SCHOOL[k].var_Zitem[i][j] = 0.0; for(i = 0; i <= nITEM; i++) for(j = 0; j <= nITEM; j++) SCHOOL[k].sample_item_mat[i][j] = SCHOOL[k].sum_item_mat[i][j] = SCHOOL[k].var_item_mat[i][j] = 0.0; for(i = 0; i <= nMAX; i++) SCHOOL[k].acc_Zsamp[i] = 0.0; for(i = 0; i <= nITEM; i++) SCHOOL[k].acc_Zitem[i] = 0.0; if(k != 0) count[k] = 0; if(k != 0){ if(k < 10){frname[9] = (char)(48); frname[10] = (char)(k + 48);} else{frname[9] = (char)(k/10 + 48); frname[10] = (char)(k%10 + 48);} inp = fopen(frname, "r"); printf("Currently Reading %s\n", frname); if(inp == NULL) {printf("Cannot open data file\n"); return 0;} for(i = 1; i <= ncount[k]; i++) for(j = 1; j <= nITEM; j++){ fscanf(inp, "%d", &SCHOOL[k].dataset[i][j]); SCHOOL[k].count_samp[i] += SCHOOL[k].dataset[i][j]; SCHOOL[k].count_item[j] += SCHOOL[k].dataset[i][j]; } fclose(inp); printf("%.2d\n", k); for(i = 1; i <= ncount[k]; i++){ for(j = 1; j <= nITEM; j++) printf("%d ", SCHOOL[k].dataset[i][j]); printf("\n"); } for(i = 1; i <= nITEM; i++) for(a = 2; a <= ncount[k]; a++) for(b = 1; b < a; b++){ SCHOOL[k].Y[i][a][b] = SCHOOL[k].dataset[a][i] * SCHOOL[k].dataset[b][i]; SCHOOL[k].Y[i][b][a] = SCHOOL[k].Y[i][a][b]; } for(a = 1; a <= ncount[k]; a++) for(i = 2; i <= nITEM; i++) for(j = 1; j < i; j++){ SCHOOL[k].U[a][i][j] = SCHOOL[k].dataset[a][i] * SCHOOL[k].dataset[a][j]; SCHOOL[k].U[a][j][i] = SCHOOL[k].U[a][i][j]; } } printf("INITIALIZATION AND DATA LOADING: %.2d\n", k); } for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++){ oldtau[i]= olddelta[i] = 0.0; for(j = 1; j <= nSCHOOL; j++) oldmu[i][j] = 0.0; } for(i = 1; i <= nSCHOOL; i++) oldsigma[i] = 0.0; // Declare Additional Variables sample_samp_like = dvector(1, nMAX); old_samp_distance = dvector(1, nMAX); new_samp_distance = dvector(1, nMAX); sample_item_like = dvector(1, nITEM); old_item_distance = dvector(1, nITEM); new_item_distance = dvector(1, nITEM); pr_var_Z = sqrt(2.0); for(v = 0; v < repeat; v++){ // Initialize Variables for(k = 1; k <= nSCHOOL; k++){ for(i = 1; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = SCHOOL[k].newbeta[i] = 0.0; for(i = 1; i <= ncount[k]; i++) SCHOOL[k].oldtheta[i] = SCHOOL[k].newtheta[i] = 0.0; for(i = 1; i <= ncount[k]; i++) for(j = 1; j <= nDIM; j++) SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] = SCHOOL[k].new_Zmean[i][j] = 0.0; for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = 0.0; for(i = 1; i <= (niter-nburn)/thin; i++){ SCHOOL[k].sample_sigma[i] = 0.0; for(j = 1; j <= nITEM; j++) SCHOOL[k].sample_beta[i][j] = 0.0; for(j = 1; j <= ncount[k]; j++) SCHOOL[k].sample_theta[i][j] = 0.0; for(a = 1; a <= ncount[k]; a++) for(b = 1; b <= nDIM; b++) SCHOOL[k].sample_Zsamp[i][a][b] = 0.0; for(a = 1; a <= nITEM; a++) for(b = 1; b <= nDIM; b++) SCHOOL[k].sample_Zitem[i][a][b] = 0.0; } for(i = 1; i <= nITEM; i++) SCHOOL[k].var_beta[i] = SCHOOL[k].sum_beta[i] = SCHOOL[k].acc_beta[i] = 0.0; for(i = 1; i <= ncount[k]; i++) SCHOOL[k].var_theta[i] = SCHOOL[k].sum_theta[i] = SCHOOL[k].acc_theta[i] = 0.0; for(i = 1; i <= ncount[k]; i++) for(j = 1; j <= nDIM; j++) SCHOOL[k].sum_Zsamp[i][j] = SCHOOL[k].var_Zsamp[i][j] = 0.0; for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) SCHOOL[k].sum_Zitem[i][j] = SCHOOL[k].var_Zitem[i][j] = 0.0; for(i = 1; i <= nITEM; i++) SCHOOL[k].acc_Zitem[i] = 0.0; for(i = 1; i <= nMAX; i++) SCHOOL[k].acc_Zsamp[i] = 0.0; for(i = 1; i <= nITEM; i++) for(j = 1; j <= nITEM; j++){ SCHOOL[k].sample_item_mat[i][j] = 0.0; SCHOOL[k].old_item_mat[i][j] = SCHOOL[k].new_item_mat[i][j] = 0.0; SCHOOL[k].sum_item_mat[i][j] = SCHOOL[k].var_item_mat[i][j] = 0.0; } for(i = 0; i <= nDIM; i++) SCHOOL[k].mean_Z[i] = 0.0; SCHOOL[k].oldsigma = SCHOOL[k].sum_sigma = SCHOOL[k].var_sigma = 0.0; count[k] = 0; } for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++){ olddelta[i] = oldtau[i] = 0.0; sum_delta[i] = var_delta[i] = 0.0; sum_tau[i] = var_tau[i] = 0.0; for(j = 1; j <= (niter-nburn)/thin; j++) sample_tau[j][i] = sample_delta[j][i] = 0.0; } for(i = 1; i <= nSCHOOL; i++){ oldsigma[i] = 0.0; sum_sigma[i] = var_sigma[i] = 0.0; for(j = 1; j <= (niter-nburn)/thin; j++) sample_sigma[j][i] = 0.0; } for(k = 1; k <= nSCHOOL; k++) for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++) oldmu[i][k] = sum_mu[i][k] = 0.0; for(i = 1; i <= nITEM; i++){ oldgamma[i] = oldvarphi[i] = 0.0; sum_gamma[i] = var_gamma[i] = 0.0; sum_varphi[i] = var_varphi[i] = 0.0; for(j = 1; j <= (niter-nburn)/thin; j++) sample_gamma[j][i] = sample_varphi[j][i] = 0.0; } for(i = 1; i <= nSCHOOL; i++) for(j = 1; j <= nSCHOOL; j++) sum_mu_dist[i][j] = 0.0; // Generate Initial Values for beta, Z, sigma for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++){ olddelta[i] = -1.5 + 3.0 * rand() / RAND_MAX; oldtau[i] = 100.0; for(j = 1; j <= nSCHOOL; j++) oldmu[i][j] = -1.5 + 3.0 * rand() / RAND_MAX; } for(i = 1; i <= nSCHOOL; i++) oldsigma[i] = 100.0; for(i = 1; i <= nITEM; i++){ oldgamma[i] = -1.5 + 3.0 * rand() / RAND_MAX; oldvarphi[i] = 100.0; } for(k = 1; k <= nSCHOOL; k++){ SCHOOL[k].oldsigma = 0.05 * 0.05; for(i = 1; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = -1.5 + 3.0 * rand() / RAND_MAX; for(i = 1; i <= ncount[k]; i++) SCHOOL[k].oldtheta[i] = -1.5 + 3.0 * rand() / RAND_MAX; for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = -1.5 + 3.0 * rand() / RAND_MAX; for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) for(a = 1; a <= ncount[k]; a++) if(SCHOOL[k].dataset[a][i] == 1) SCHOOL[k].old_Zmean[a][j] += SCHOOL[k].old_Zitem[i][j] / (SCHOOL[k].count_samp[a] * 1.0); for(i = 1; i <= ncount[k]; i++) for(j = 1; j <= nDIM; j++) SCHOOL[k].new_Zmean[i][j] = SCHOOL[k].old_Zmean[i][j]; for(i = 1; i <= ncount[k]; i++) for(j = 1; j <= nDIM; j++) SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] + sqrt(SCHOOL[k].oldsigma) * gasdev(); for(i = 2; i <= nITEM; i++) for(j = 1; j < i; j++){ for(l = 1; l <= nDIM; l++) SCHOOL[k].old_item_mat[i][j] += pow((SCHOOL[k].old_Zitem[i][l] - SCHOOL[k].old_Zitem[j][l]), 2.0); SCHOOL[k].old_item_mat[i][j] = sqrt(SCHOOL[k].old_item_mat[i][j]); SCHOOL[k].old_item_mat[j][i] = SCHOOL[k].old_item_mat[i][j]; } for(i = 1; i <= nITEM; i++) for(j = 1; j <= nITEM; j++) SCHOOL[k].new_item_mat[i][j] = SCHOOL[k].old_item_mat[i][j]; } // MCMC Implementation for Parameter Estimation frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'i'; frname[9] = 'm'; frname[10] = '_'; frname[12] = (char)(48+MM); frname[13] = '.'; frname[14] = 'l'; frname[15] = 'o'; frname[16] = 'g'; frname[17] = '\0'; frname[11] = 's'; HUR = fopen(frname, "a"); frname[11] = 'l'; JYW = fopen(frname, "a"); frname[11] = 'u'; OUT = fopen(frname, "a"); frname[11] = 'g'; JIN = fopen(frname, "a"); frname[11] = 'p'; PRT = fopen(frname, "a"); frname[11] = 'a'; JJW = fopen(frname, "a"); gcount = mcount = 0; for(iter = 1; iter <= niter; iter++){ for(a = 1; a <= nSCHOOL; a++){ for(i = 1; i <= nITEM; i++){ //#pragma omp parallel for private(j, k) default(shared) for(j = 1; j <= nDIM; j++){ SCHOOL[a].new_Zitem[i][j] = SCHOOL[a].old_Zitem[i][j] + jump_Z[jump_index[a][i]] * gasdev(); for(k = 1; k <= ncount[a]; k++) if(SCHOOL[a].dataset[k][i] == 1){ SCHOOL[a].new_Zmean[k][j] -= SCHOOL[a].old_Zitem[i][j] / (SCHOOL[a].count_samp[k] * 1.0); SCHOOL[a].new_Zmean[k][j] += SCHOOL[a].new_Zitem[i][j] / (SCHOOL[a].count_samp[k] * 1.0); } } for(ind = 1; ind <= nITEM; ind++) sample_item_like[ind] = old_item_distance[ind] = new_item_distance[ind] = 0.0; //#pragma omp parallel for private(ind, k, l) default(shared) for(ind = 1; ind <= nITEM; ind++) if(ind != i){ for(l = 1; l <= nDIM; l++){ old_item_distance[ind] += pow((SCHOOL[a].old_Zitem[ind][l] - SCHOOL[a].old_Zitem[i][l]), 2.0); new_item_distance[ind] += pow((SCHOOL[a].new_Zitem[ind][l] - SCHOOL[a].new_Zitem[i][l]), 2.0); } old_item_distance[ind] = sqrt(old_item_distance[ind]); new_item_distance[ind] = sqrt(new_item_distance[ind]); SCHOOL[a].new_item_mat[ind][i] = new_item_distance[ind]; SCHOOL[a].new_item_mat[i][ind] = SCHOOL[a].new_item_mat[ind][i]; SCHOOL[a].old_item_mat[ind][i] = old_item_distance[ind]; SCHOOL[a].old_item_mat[i][ind] = SCHOOL[a].old_item_mat[ind][i]; for(k = 1; k <= ncount[a]; k++){ if(SCHOOL[a].U[k][ind][i] == 1){ sample_item_like[ind] -= -log(1.0 + exp(-(SCHOOL[a].oldtheta[k] - old_item_distance[ind]))); sample_item_like[ind] += -log(1.0 + exp(-(SCHOOL[a].oldtheta[k] - new_item_distance[ind]))); } else{ sample_item_like[ind] -= -log(1.0 + exp(SCHOOL[a].oldtheta[k] - old_item_distance[ind])); sample_item_like[ind] += -log(1.0 + exp(SCHOOL[a].oldtheta[k] - new_item_distance[ind])); } } } update_like_item = 0.0; for(ind = 1; ind <= nITEM; ind++) update_like_item += sample_item_like[ind]; num = den = 0.0; for(j = 2; j <= nITEM; j++) for(k = 1; k < j; k++){ if(SCHOOL[a].new_item_mat[j][k] > 0.0001) num += dlognorm(log(SCHOOL[a].new_item_mat[j][k]), olddelta[((j-1)*(j-2)/2+k)], sqrt(oldtau[((j-1)*(j-2)/2+k)])); else num += dlognorm(log(0.0001), olddelta[((j-1)*(j-2)/2+k)], sqrt(oldtau[((j-1)*(j-2)/2+k)])); if(SCHOOL[a].old_item_mat[j][k] > 0.0001) den += dlognorm(log(SCHOOL[a].old_item_mat[j][k]), olddelta[((j-1)*(j-2)/2+k)], sqrt(oldtau[((j-1)*(j-2)/2+k)])); else den += dlognorm(log(0.0001), olddelta[((j-1)*(j-2)/2+k)], sqrt(oldtau[((j-1)*(j-2)/2+k)])); //printf("%d %d-%.3f %.3f %.3f %.3f %.3f\n", j, k, num, den, oldmu[((j-1)*(j-2)/2+k)][a], log(SCHOOL[a].new_item_mat[j][k]), log(SCHOOL[a].old_item_mat[j][k])); } ratio = update_like_item + (num - den); //printf("SCHOOL-%.2d, ITEM-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); if(ratio > 0.0) accept = 1; else{ un = rand() * 1.0 / RAND_MAX; if(log(un) < ratio) accept = 1; else accept = 0; } if(accept == 1){ for(j = 1; j <= nDIM; j++){ SCHOOL[a].old_Zitem[i][j] = SCHOOL[a].new_Zitem[i][j]; for(k = 1; k <= ncount[a]; k++) if(SCHOOL[a].dataset[k][i] == 1) SCHOOL[a].old_Zmean[k][j] = SCHOOL[a].new_Zmean[k][j]; } SCHOOL[a].acc_Zitem[i] += 1.0 / niter; for(j = 1; j <= nITEM; j++) for(k = 1; k <= nITEM; k++) SCHOOL[a].old_item_mat[j][k] = SCHOOL[a].new_item_mat[j][k]; } else{ for(j = 1; j <= nDIM; j++){ SCHOOL[a].new_Zitem[i][j] = SCHOOL[a].old_Zitem[i][j]; for(k = 1; k <= ncount[a]; k++) if(SCHOOL[a].dataset[k][i] == 1) SCHOOL[a].new_Zmean[k][j] = SCHOOL[a].old_Zmean[k][j]; } for(j = 1; j <= nITEM; j++) for(k = 1; k <= nITEM; k++) SCHOOL[a].new_item_mat[j][k] = SCHOOL[a].old_item_mat[j][k]; } } for(i = 1; i <= ncount[a]; i++){ for(j = 1; j <= nDIM; j++) SCHOOL[a].new_Zsamp[i][j] = SCHOOL[a].old_Zsamp[i][j] + jump_W * gasdev(); for(ind = 1; ind <= ncount[a]; ind++) sample_samp_like[ind] = old_samp_distance[ind] = new_samp_distance[ind] = 0.0; //#pragma omp parallel for private(ind, k, l) default(shared) for(ind = 1; ind <= ncount[a]; ind++) if(ind != i){ for(l = 1; l <= nDIM; l++){ old_samp_distance[ind] += pow((SCHOOL[a].old_Zsamp[ind][l] - SCHOOL[a].old_Zsamp[i][l]), 2.0); new_samp_distance[ind] += pow((SCHOOL[a].old_Zsamp[ind][l] - SCHOOL[a].new_Zsamp[i][l]), 2.0); } old_samp_distance[ind] = sqrt(old_samp_distance[ind]); new_samp_distance[ind] = sqrt(new_samp_distance[ind]); for(k = 1; k <= nITEM; k++){ if(SCHOOL[a].Y[k][ind][i] == 1){ sample_samp_like[ind] -= -log(1.0 + exp(-(SCHOOL[a].oldbeta[k] - old_samp_distance[ind]))); sample_samp_like[ind] += -log(1.0 + exp(-(SCHOOL[a].oldbeta[k] - new_samp_distance[ind]))); } else{ sample_samp_like[ind] -= -log(1.0 + exp(SCHOOL[a].oldbeta[k] - old_samp_distance[ind])); sample_samp_like[ind] += -log(1.0 + exp(SCHOOL[a].oldbeta[k] - new_samp_distance[ind])); } } } update_like_samp = 0.0; for(ind = 1; ind <= ncount[a]; ind++) update_like_samp += sample_samp_like[ind]; //printf("SCHOOL-%.2d, PERSON-%.2d: LIKELIHOOD_PERSON-%.3f\n", a, i, update_like_samp); num = den = 0.0; //printf("SCHOOL-%.2d, PERSON-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); for(j = 1; j <= nDIM; j++){ num += dlognorm(SCHOOL[a].new_Zsamp[i][j], SCHOOL[a].old_Zmean[i][j], sqrt(SCHOOL[a].oldsigma)); den += dlognorm(SCHOOL[a].old_Zsamp[i][j], SCHOOL[a].old_Zmean[i][j], sqrt(SCHOOL[a].oldsigma)); } ratio = update_like_samp + (num - den); //printf("SCHOOL-%.2d, PERSON-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); if(ratio > 0.0) accept = 1; else{ un = rand() * 1.0 / RAND_MAX; if(log(un) < ratio) accept = 1; else accept = 0; } if(accept == 1){ for(j = 1; j <= nDIM; j++) SCHOOL[a].old_Zsamp[i][j] = SCHOOL[a].new_Zsamp[i][j]; SCHOOL[a].acc_Zsamp[i] += 1.0 / niter; } else{ for(j = 1; j <= nDIM; j++) SCHOOL[a].new_Zsamp[i][j] = SCHOOL[a].old_Zsamp[i][j]; } } SCHOOL[a].post_a = prior_a; SCHOOL[a].post_b = prior_b; for(i = 1; i <= ncount[a]; i++) for(j = 1; j <= nDIM; j++){ SCHOOL[a].post_a += 0.5; SCHOOL[a].post_b += 0.5 * (SCHOOL[a].old_Zsamp[i][j] - SCHOOL[a].old_Zmean[i][j]) * (SCHOOL[a].old_Zsamp[i][j] - SCHOOL[a].old_Zmean[i][j]); } SCHOOL[a].oldsigma = 1.0 / Rgamma(SCHOOL[a].post_a, SCHOOL[a].post_b); // 2. Update $\beta_i$ from the proposal distribution $\phi_2(\cdot)$ //#pragma omp parallel for private(i, j, k, old_like_beta, new_like_beta, num, den, accept, ratio, un) default(shared) for(i = 1; i <= nITEM; i++){ old_like_beta = cost_beta(i, SCHOOL[a].oldbeta[i], a); SCHOOL[a].newbeta[i] = SCHOOL[a].oldbeta[i] + jump_beta * gasdev(); if(fabs(SCHOOL[a].newbeta[i]) < 7.0){ new_like_beta = cost_beta(i, SCHOOL[a].newbeta[i], a); num = new_like_beta; den = old_like_beta; num += dlognorm(SCHOOL[a].oldbeta[i], oldgamma[i], sqrt(oldvarphi[i])); den += dlognorm(SCHOOL[a].newbeta[i], oldgamma[i], sqrt(oldvarphi[i])); ratio = num - den; if(ratio > 0.0) accept = 1; else{ un = rand() * 1.0 / RAND_MAX; if(log(un) < ratio) accept = 1; else accept = 0; } } else accept = 0; if(accept == 1){ SCHOOL[a].oldbeta[i] = SCHOOL[a].newbeta[i]; SCHOOL[a].acc_beta[i] += 1.0 / niter; } else SCHOOL[a].newbeta[i] = SCHOOL[a].oldbeta[i]; } //#pragma omp parallel for private(i, old_like_theta, new_like_theta, num, den, accept, ratio, un) default(shared) for(i = 1; i <= ncount[a]; i++){ old_like_theta = cost_theta(i, SCHOOL[a].oldtheta[i], a); SCHOOL[a].newtheta[i] = SCHOOL[a].oldtheta[i] + jump_theta * gasdev(); new_like_theta = cost_theta(i, SCHOOL[a].newtheta[i], a); num = dlognorm(SCHOOL[a].newtheta[i], pr_mean_theta, pr_var_theta) + new_like_theta; den = dlognorm(SCHOOL[a].oldtheta[i], pr_mean_theta, pr_var_theta) + old_like_theta; ratio = num - den; if(ratio > 0.0) accept = 1; else{ un = rand() * 1.0 / RAND_MAX; if(log(un) < ratio) accept = 1; else accept = 0; } if(accept == 1){ SCHOOL[a].oldtheta[i] = SCHOOL[a].newtheta[i]; SCHOOL[a].acc_theta[i] += 1.0 / niter; } else SCHOOL[a].newtheta[i] = SCHOOL[a].oldtheta[i]; } // Save MCMC Results to Files and Repository Variables if(iter > nburn && iter % thin == 0){ count[a]++; for(i = 1; i <= ncount[a]; i++) for(j = 1; j <= nDIM; j++) SCHOOL[a].sample_Zsamp[count[a]][i][j] = SCHOOL[a].old_Zsamp[i][j]; for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) SCHOOL[a].sample_Zitem[count[a]][i][j] = SCHOOL[a].old_Zitem[i][j]; for(i = 1; i <= nITEM; i++) SCHOOL[a].sample_beta[count[a]][i] = SCHOOL[a].oldbeta[i]; for(i = 1; i <= ncount[a]; i++) SCHOOL[a].sample_theta[count[a]][i] = SCHOOL[a].oldtheta[i]; SCHOOL[a].sample_sigma[count[a]] = SCHOOL[a].oldsigma; } // Print MCMC Results to Screen if(iter % print == 0){ printf("%.5d-BETA%.2d ", iter, a); for(i = 1; i <= nITEM; i++) printf("% .4f ", SCHOOL[a].oldbeta[i]); printf("%.4f\n", SCHOOL[a].oldsigma); } } //#pragma omp parallel for private(i, j, school_a, school_b, avg_beta, var_beta) default(shared) for(i = 1; i <= nITEM; i++){ school_a = prior_a; school_b = prior_b; for(j = 1; j <= nSCHOOL; j++){ school_a += 0.5; school_b += 0.5 * (SCHOOL[j].oldbeta[i] - oldgamma[i]) * (SCHOOL[j].oldbeta[i] - oldgamma[i]); } oldvarphi[i] = 1.0 / Rgamma(school_a, school_b); var_beta = 1.0 / (1.0 / pr_var_gamma + nSCHOOL / oldvarphi[i]); avg_beta = 0.0; for(j = 1; j <= nSCHOOL; j++) avg_beta += SCHOOL[j].oldbeta[i] / nSCHOOL; avg_beta *= var_beta * (nSCHOOL / oldvarphi[i]); oldgamma[i] = avg_beta + sqrt(var_beta) * gasdev(); } for(i = 2; i <= nITEM; i++) for(j = 1; j < i; j++){ post_a = prior_a; post_b = prior_b; for(k = 1; k <= nSCHOOL; k++){ post_a += 0.5; if(SCHOOL[k].old_item_mat[i][j] > 0.0001) post_b += 0.5 * (log(SCHOOL[k].old_item_mat[i][j]) - olddelta[((i-1)*(i-2)/2+j)]) * (log(SCHOOL[k].old_item_mat[i][j]) - olddelta[((i-1)*(i-2)/2+j)]); else post_b += 0.5 * (log(0.0001) - olddelta[((i-1)*(i-2)/2+j)]) * (log(0.0001) - olddelta[((i-1)*(i-2)/2+j)]); } oldtau[((i-1)*(i-2)/2+j)] = 1.0 / Rgamma(post_a, post_b); var_fix = 1.0 / (1.0 / pr_var_delta + nSCHOOL / oldtau[((i-1)*(i-2)/2+j)]); avg_fix = 0.0; for(k = 1; k <= nSCHOOL; k++){ if(SCHOOL[k].old_item_mat[i][j] > 0.0001) avg_fix += (1.0 / oldtau[((i-1)*(i-2)/2+j)]) * log(SCHOOL[k].old_item_mat[i][j]); else avg_fix += (1.0 / oldtau[((i-1)*(i-2)/2+j)]) * log(0.0001); } avg_fix *= var_fix; olddelta[((i-1)*(i-2)/2+j)] = avg_fix + sqrt(var_fix) * gasdev(); } if(iter % print == 0) for(i = 1; i <= nITEM; i++){ printf("%.5d-GAMMA, VARPHI, ITEM%.2d: ", iter, i); printf("% .4f %.4f\n", oldgamma[i], oldvarphi[i]); } if(iter > nburn && iter % thin == 0){ gcount++; for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++){ sample_tau[gcount][i] = sqrt(oldtau[i]); sample_delta[gcount][i] = olddelta[i]; fprintf(JYW, "% .4f ", sample_delta[gcount][i]); fprintf(OUT, "%.4f ", sample_tau[gcount][i]); } for(i = 1; i <= nSCHOOL; i++){ sample_sigma[gcount][i] = sqrt(oldsigma[i]); fprintf(HUR, "%.4f ", sample_sigma[gcount][i]); } for(k = 1; k <= nSCHOOL; k++) for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++) sum_mu[i][k] += oldmu[i][k] / ((niter-nburn)/thin); for(i = 1; i <= nITEM; i++){ sample_gamma[gcount][i] = oldgamma[i]; sample_varphi[gcount][i] = sqrt(oldvarphi[i]); fprintf(JIN, "% .4f ", sample_gamma[gcount][i]); fprintf(PRT, "%.4f ", sample_varphi[gcount][i]); } for(i = 1; i <= nSCHOOL; i++) for(j = 1; j <= nSCHOOL; j++) mu_dist[i][j] = 0.0; for(k = 1; k <= nITEM * (nITEM - 1) / 2; k++) for(i = 2; i <= nSCHOOL; i++) for(j = 1; j < i; j++) mu_dist[i][j] += (oldmu[k][i] - oldmu[k][j]) * (oldmu[k][i] - oldmu[k][j]); for(i = 2; i <= nSCHOOL; i++) for(j = 1; j < i; j++) mu_dist[j][i] = mu_dist[i][j]; for(i = 1; i <= nSCHOOL; i++) for(j = 1; j <= nSCHOOL; j++) sum_mu_dist[i][j] += sqrt(mu_dist[i][j]) / ((niter-nburn)/thin); for(i = 2; i <= nSCHOOL; i++) for(j = 1; j < i; j++) fprintf(JJW, "%.4f ", sqrt(mu_dist[i][j])); fprintf(HUR, "\n"); fprintf(OUT, "\n"); fprintf(JYW, "\n"); fprintf(JIN, "\n"); fprintf(PRT, "\n"); fprintf(JJW, "\n"); } } fclose(HUR); fclose(JYW); fclose(OUT); fclose(JIN); fclose(PRT); fclose(JJW); frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'i'; frname[9] = 'm'; frname[12] = '_'; frname[14] = (char)(48+MM); frname[15] = '.'; frname[16] = 'l'; frname[17] = 'o'; frname[18] = 'g'; frname[19] = '\0'; for(a = 1; a <= nSCHOOL; a++){ if(a < 10){frname[10] = (char)(48); frname[11] = (char)(a + 48);} else{frname[10] = (char)(a/10 + 48); frname[11] = (char)(a%10 + 48);} frname[13] = 'z'; JIN = fopen(frname, "a"); frname[13] = 'b'; HUR = fopen(frname, "a"); frname[13] = 't'; OUT = fopen(frname, "a"); frname[13] = 'i'; JYW = fopen(frname, "a"); frname[13] = 'h'; ASA = fopen(frname, "a"); for(k = 1; k <= count[a]; k++){ for(i = 1; i <= ncount[a]; i++) for(j = 1; j <= nDIM; j++) fprintf(JIN, "% .4f ", SCHOOL[a].sample_Zsamp[k][i][j]); fprintf(JIN, "\n"); for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) fprintf(JYW, "% .4f ", SCHOOL[a].sample_Zitem[k][i][j]); fprintf(JYW, "\n"); for(i = 1; i <= nITEM; i++) fprintf(HUR, "% .4f ", SCHOOL[a].sample_beta[k][i]); fprintf(HUR, "\n"); for(i = 1; i <= ncount[a]; i++) fprintf(OUT, "% .4f ", SCHOOL[a].sample_theta[k][i]); fprintf(OUT, "\n"); fprintf(ASA, "%.4f\n", SCHOOL[a].sample_sigma[k]); } fclose(JIN); fclose(HUR); fclose(OUT); fclose(JYW); fclose(ASA); } // Calculate Mean and Variance of MCMC Estimators for(a = 1; a <= nSCHOOL; a++){ for(i = 1; i <= count[a]; i++){ SCHOOL[a].sum_sigma += SCHOOL[a].sample_sigma[i] / count[a]; SCHOOL[a].var_sigma += SCHOOL[a].sample_sigma[i] * SCHOOL[a].sample_sigma[i] / (count[a] - 1); for(j = 1; j <= nITEM; j++){ SCHOOL[a].sum_beta[j] += SCHOOL[a].sample_beta[i][j] / count[a]; SCHOOL[a].var_beta[j] += SCHOOL[a].sample_beta[i][j] * SCHOOL[a].sample_beta[i][j] / (count[a] - 1); } for(j = 1; j <= ncount[a]; j++){ SCHOOL[a].sum_theta[j] += SCHOOL[a].sample_theta[i][j] / count[a]; SCHOOL[a].var_theta[j] += SCHOOL[a].sample_theta[i][j] * SCHOOL[a].sample_theta[i][j] / (count[a] - 1); } for(j = 1; j <= ncount[a]; j++) for(k = 1; k <= nDIM; k++){ SCHOOL[a].sum_Zsamp[j][k] += SCHOOL[a].sample_Zsamp[i][j][k] / count[a]; SCHOOL[a].var_Zsamp[j][k] += SCHOOL[a].sample_Zsamp[i][j][k] * SCHOOL[a].sample_Zsamp[i][j][k] / (count[a] - 1); } for(j = 1; j <= nITEM; j++) for(k = 1; k <= nDIM; k++){ SCHOOL[a].sum_Zitem[j][k] += SCHOOL[a].sample_Zitem[i][j][k] / count[a]; SCHOOL[a].var_Zitem[j][k] += SCHOOL[a].sample_Zitem[i][j][k] * SCHOOL[a].sample_Zitem[i][j][k] / (count[a] - 1); } for(j = 1; j <= nITEM; j++) for(k = 1; k <= nITEM; k++) SCHOOL[a].sample_item_mat[j][k] = 0.0; for(j = 2; j <= nITEM; j++) for(k = 1; k < j; k++) for(l = 1; l <= nDIM; l++) SCHOOL[a].sample_item_mat[j][k] += pow((SCHOOL[a].sample_Zitem[i][j][l] - SCHOOL[a].sample_Zitem[i][k][l]), 2.0); for(j = 2; j <= nITEM; j++) for(k = 1; k < j; k++) SCHOOL[a].sample_item_mat[k][j] = SCHOOL[a].sample_item_mat[j][k]; for(j = 1; j <= nITEM; j++) for(k = 1; k <= nITEM; k++){ SCHOOL[a].sum_item_mat[j][k] += SCHOOL[a].sample_item_mat[j][k] / count[a]; SCHOOL[a].var_item_mat[j][k] += SCHOOL[a].sample_item_mat[j][k] * SCHOOL[a].sample_item_mat[j][k] / (count[a] - 1); } } SCHOOL[a].var_sigma -= SCHOOL[a].sum_sigma * SCHOOL[a].sum_sigma * count[a] / (count[a] - 1); for(i = 1; i <= nITEM; i++) SCHOOL[a].var_beta[i] -= SCHOOL[a].sum_beta[i] * SCHOOL[a].sum_beta[i] * count[a] / (count[a] - 1); for(i = 1; i <= ncount[a]; i++) SCHOOL[a].var_theta[i] -= SCHOOL[a].sum_theta[i] * SCHOOL[a].sum_theta[i] * count[a] / (count[a] - 1); for(i = 1; i <= ncount[a]; i++) for(j = 1; j <= nDIM; j++) SCHOOL[a].var_Zsamp[i][j] -= SCHOOL[a].sum_Zsamp[i][j] * SCHOOL[a].sum_Zsamp[i][j] * count[a] / (count[a] - 1); for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) SCHOOL[a].var_Zitem[i][j] -= SCHOOL[a].sum_Zitem[i][j] * SCHOOL[a].sum_Zitem[i][j] * count[a] / (count[a] - 1); for(i = 1; i <= nITEM; i++) for(j = 1; j <= nITEM; j++) SCHOOL[a].var_item_mat[i][j] -= SCHOOL[a].sum_item_mat[i][j] * SCHOOL[a].sum_item_mat[i][j] * count[a] / (count[a] - 1); } for(i = 1; i <= gcount; i++){ for(j = 1; j <= nITEM * (nITEM - 1) / 2; j++){ sum_tau[j] += sample_tau[i][j] / gcount; sum_delta[j] += sample_delta[i][j] / gcount; var_tau[j] += sample_tau[i][j] * sample_tau[i][j] / (gcount - 1); var_delta[j] += sample_delta[i][j] * sample_delta[i][j] / (gcount - 1); } for(j = 1; j <= nSCHOOL; j++){ sum_sigma[j] += sample_sigma[i][j] / gcount; var_sigma[j] += sample_sigma[i][j] * sample_sigma[i][j] / (gcount - 1); } for(j = 1; j <= nITEM; j++){ sum_gamma[j] += sample_gamma[i][j] / gcount; sum_varphi[j] += sample_varphi[i][j] / gcount; var_gamma[j] += sample_gamma[i][j] * sample_gamma[i][j] / (gcount - 1); var_varphi[j] += sample_varphi[i][j] * sample_varphi[i][j] / (gcount - 1); } } for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++){ var_tau[i] -= sum_tau[i] * sum_tau[i] * gcount / (gcount - 1); var_delta[i] -= sum_delta[i] * sum_delta[i] * gcount / (gcount - 1); } for(i = 1; i <= nSCHOOL; i++) var_sigma[i] -= sum_sigma[i] * sum_sigma[i] * gcount / (gcount - 1); for(i = 1; i <= nITEM; i++){ var_gamma[i] -= sum_gamma[i] * sum_gamma[i] * gcount / (gcount - 1); var_varphi[i] -= sum_varphi[i] * sum_varphi[i] * gcount / (gcount - 1); } // Save Parameter Estimates frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'u'; frname[9] = 'm'; frname[12] = '_'; frname[14] = (char)(48+MM); frname[15] = '.'; frname[16] = 'l'; frname[17] = 'o'; frname[18] = 'g'; frname[19] = '\0'; for(a = 1; a <= nSCHOOL; a++){ if(a < 10){frname[10] = (char)(48); frname[11] = (char)(a + 48);} else{frname[10] = (char)(a/10 + 48); frname[11] = (char)(a%10 + 48);} frname[13] = 'z'; JIN = fopen(frname, "a"); frname[13] = 'b'; HUR = fopen(frname, "a"); frname[13] = 't'; OUT = fopen(frname, "a"); frname[13] = 'i'; JYW = fopen(frname, "a"); frname[13] = 'd'; PRT = fopen(frname, "a"); for(i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].sum_beta[i]); fprintf(HUR, "\n"); for(i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].var_beta[i]); fprintf(HUR, "\n"); for(i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].acc_beta[i]); fprintf(HUR, "\n"); for(i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].sum_theta[i]); fprintf(OUT, "\n"); for(i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].var_theta[i]); fprintf(OUT, "\n"); for(i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].acc_theta[i]); fprintf(OUT, "\n"); for(i = 1; i <= ncount[a]; i++) for(j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].sum_Zsamp[i][j]); fprintf(JIN, "\n"); for(i = 1; i <= ncount[a]; i++) for(j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].var_Zsamp[i][j]); fprintf(JIN, "\n"); for(i = 1; i <= ncount[a]; i++) for(j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].acc_Zsamp[i]); fprintf(JIN, "\n"); for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].sum_Zitem[i][j]); fprintf(JYW, "\n"); for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].var_Zitem[i][j]); fprintf(JYW, "\n"); for(i = 1; i <= nITEM; i++) for(j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].acc_Zitem[i]); fprintf(JYW, "\n"); for(i = 2; i <= nITEM; i++) for(j = 1; j < i; j++) fprintf(PRT, "%.4f ", SCHOOL[a].sum_item_mat[i][j]); fprintf(PRT, "\n"); for(i = 2; i <= nITEM; i++) for(j = 1; j < i; j++) fprintf(PRT, "%.4f ", SCHOOL[a].var_item_mat[i][j]); fprintf(PRT, "\n"); fclose(JIN); fclose(HUR); fclose(OUT); fclose(JYW); fclose(PRT); } frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'u'; frname[9] = 'm'; frname[10] = '_'; frname[12] = (char)(48+MM); frname[13] = '.'; frname[14] = 'l'; frname[15] = 'o'; frname[16] = 'g'; frname[17] = '\0'; frname[11] = 'm'; JIN = fopen(frname, "a"); frname[11] = 's'; HUR = fopen(frname, "a"); frname[11] = 'l'; JYW = fopen(frname, "a"); frname[11] = 'u'; OUT = fopen(frname, "a"); frname[11] = 'g'; ASA = fopen(frname, "a"); frname[11] = 'p'; PRT = fopen(frname, "a"); frname[11] = 'h'; IMS = fopen(frname, "a"); frname[11] = 'a'; JJW = fopen(frname, "a"); for(i = 1; i <= nSCHOOL; i++) fprintf(HUR, "% .4f ", sum_sigma[i]); fprintf(HUR, "\n"); for(i = 1; i <= nSCHOOL; i++) fprintf(HUR, "% .4f ", var_sigma[i]); fprintf(HUR, "\n"); for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(OUT, "% .4f ", sum_tau[i]); fprintf(OUT, "\n"); for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(OUT, "% .4f ", var_tau[i]); fprintf(OUT, "\n"); for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JYW, "% .4f ", sum_delta[i]); fprintf(JYW, "\n"); for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JYW, "% .4f ", var_delta[i]); fprintf(JYW, "\n"); for(k = 1; k <= nSCHOOL; k++){ for(i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JIN, "% .4f ", sum_mu[i][k]); fprintf(JIN, "\n"); } for(i = 1; i <= nITEM; i++) fprintf(ASA, "% .4f ", sum_gamma[i]); fprintf(ASA, "\n"); for(i = 1; i <= nITEM; i++) fprintf(ASA, "% .4f ", var_gamma[i]); fprintf(ASA, "\n"); for(i = 1; i <= nITEM; i++) fprintf(PRT, "%.4f ", sum_varphi[i]); fprintf(PRT, "\n"); for(i = 1; i <= nITEM; i++) fprintf(PRT, "%.4f ", var_varphi[i]); fprintf(PRT, "\n"); for(k = 1; k <= nSCHOOL; k++) fprintf(IMS, "%.4f ", SCHOOL[k].sum_sigma); fprintf(IMS, "\n"); for(k = 1; k <= nSCHOOL; k++) fprintf(IMS, "%.4f ", SCHOOL[k].var_sigma); fprintf(IMS, "\n"); for(i = 1; i <= nSCHOOL; i++){ for(j = 1; j <= nSCHOOL; j++) fprintf(JJW, "%.4f ", sum_mu_dist[i][j]); fprintf(JJW, "\n"); } fclose(JIN); fclose(HUR); fclose(JYW); fclose(OUT); fclose(ASA); fclose(PRT); fclose(IMS); fclose(JJW); } /* free_ivector(ncount, 1, nSCHOOL); free_dvector(jump_Z, 0, nITEM); for(k = 0; k <= nSCHOOL; k++){ for(i = 0; i <= nMAX; i++) free(SCHOOL[k].dataset[i]); for(i = 0; i <= nITEM; i++){ for(a = 0; a <= nMAX; a++) free(SCHOOL[k].Y[i][a]); free(SCHOOL[k].Y[i]); } for(i = 0; i <= nMAX; i++){ for(a = 0; a <= nITEM; a++) free(SCHOOL[k].U[i][a]); free(SCHOOL[k].U[i]); } for(i = 0; i <= nMAX; i++){free(SCHOOL[k].old_Zsamp[i]); free(SCHOOL[k].new_Zsamp[i]);} for(i = 0; i <= nITEM; i++){free(SCHOOL[k].old_Zitem[i]); free(SCHOOL[k].new_Zitem[i]);} for(i = 0; i <= (niter-nburn)/thin; i++){ for(j = 0; j <= nMAX; j++) free(SCHOOL[k].sample_Zsamp[i][j]); for(j = 0; j <= nITEM; j++) free(SCHOOL[k].sample_Zitem[i][j]); free(SCHOOL[k].sample_beta[i]); free(SCHOOL[k].sample_theta[i]); free(SCHOOL[k].sample_Zsamp[i]); free(SCHOOL[k].sample_Zitem[i]); } for(i = 0; i <= nMAX; i++){free(SCHOOL[k].sum_Zsamp[i]); free(SCHOOL[k].var_Zsamp[i]);} for(i = 0; i <= nITEM; i++){free(SCHOOL[k].sum_Zitem[i]); free(SCHOOL[k].var_Zitem[i]);} for(i = 0; i <= nITEM; i++){free(SCHOOL[k].sum_item_mat[i]); free(SCHOOL[k].var_item_mat[i]);} for(i = 0; i <= nITEM; i++){free(SCHOOL[k].old_item_mat[i]); free(SCHOOL[k].new_item_mat[i]); free(SCHOOL[k].sample_item_mat[i]);} free(SCHOOL[k].old_item_mat); free(SCHOOL[k].new_item_mat); free(SCHOOL[k].oldbeta); free(SCHOOL[k].newbeta); free(SCHOOL[k].oldtheta); free(SCHOOL[k].newtheta); free(SCHOOL[k].count_item); free(SCHOOL[k].count_samp); free(SCHOOL[k].Y); free(SCHOOL[k].U); free(SCHOOL[k].dataset); free(SCHOOL[k].old_Zsamp); free(SCHOOL[k].new_Zsamp); free(SCHOOL[k].old_Zitem); free(SCHOOL[k].new_Zitem); free(SCHOOL[k].sample_beta); free(SCHOOL[k].sample_theta); free(SCHOOL[k].sum_beta); free(SCHOOL[k].var_beta); free(SCHOOL[k].acc_beta); free(SCHOOL[k].sum_theta); free(SCHOOL[k].var_theta); free(SCHOOL[k].acc_theta); free(SCHOOL[k].sample_Zsamp); free(SCHOOL[k].sample_Zitem); free(SCHOOL[k].sample_item_mat); free(SCHOOL[k].sum_Zsamp); free(SCHOOL[k].var_Zsamp); free(SCHOOL[k].acc_Zsamp); free(SCHOOL[k].sum_Zitem); free(SCHOOL[k].var_Zitem); free(SCHOOL[k].sum_item_mat); free(SCHOOL[k].var_item_mat); free(SCHOOL[k].sample_sigma); free(SCHOOL[k].mean_Z); } free(SCHOOL); free_dmatrix(sample_sigma, 1, (niter - nburn) / thin, 1, nSCHOOL); free_dmatrix(sample_delta, 1, (niter - nburn) / thin, 1, nITEM * nDIM); free_dmatrix(sample_tau, 1, (niter - nburn) / thin, 1, nITEM * nDIM); free_dmatrix(sample_gamma, 1, (niter-nburn)/thin, 1, nITEM); free_dmatrix(sample_varphi, 1, (niter-nburn)/thin, 1, nITEM); free_dmatrix(sum_mu, 1, nITEM * nDIM, 0, nSCHOOL); free_dvector(sum_tau, 1, nITEM * nDIM); free_dvector(var_tau, 1, nITEM * nDIM); free_dvector(sum_sigma, 1, nSCHOOL); free_dvector(var_sigma, 1, nSCHOOL); free_dvector(sum_delta, 1, nITEM * nDIM); free_dvector(var_delta, 1, nITEM * nDIM); free_dvector(sum_gamma, 1, nITEM); free_dvector(var_gamma, 1, nITEM); free_dvector(sum_varphi, 1, nITEM); free_dvector(var_varphi, 1, nITEM); free_dvector(oldsigma, 1, nSCHOOL); free_dvector(olddelta, 1, nITEM * nDIM); free_dvector(oldtau, 1, nITEM * nDIM); free_dmatrix(oldmu, 1, nITEM * nDIM, 0, nSCHOOL); free_dvector(oldgamma, 1, nITEM); free_dvector(oldvarphi, 1, nITEM); free_dmatrix(mu_dist, 1, nSCHOOL, 1, nSCHOOL); free_dmatrix(sum_mu_dist, 1, nSCHOOL, 1, nSCHOOL); free_dvector(avg_ran, 1, nSCHOOL); free_dvector(var_ran, 1, nSCHOOL); free_ivector(count, 1, nSCHOOL); free_dvector(sample_samp_like, 1, nMAX); free_dvector(new_samp_distance, 1, nMAX); free_dvector(old_samp_distance, 1, nMAX); free_dvector(sample_item_like, 1, nMAX); free_dmatrix(new_item_distance, 1, nITEM, 1, nITEM); free_dmatrix(old_item_distance, 1, nITEM, 1, nITEM); */ return 0; }
#include "header.h" #include "FILE/nrutil.h" #include "FILE/stat.c" #include "cost.c" int main(int argc, const char *argv[]) { //Declare Variables FILE * inp, *JIN, *HUR, *OUT, *PRT, *JYW, *ASA, *IMS, *JJW; char buf[255], frname[255]; int stime; long ltime; int ind, ite, a, b, i, j, k, l, v, accept, gcount, mcount, mutmp, *count, show1, show2; double num, den, un, ratio; double old_like_beta, new_like_beta, old_like_theta, new_like_theta; double update_like_samp, update_like_item, tmp_oldmu, tmp_newmu; double post_a, post_b, school_a, school_b; double *old_samp_distance, *new_samp_distance, *sample_samp_like; double *old_item_distance, *new_item_distance, *sample_item_like; double **sum_mu, **mu_dist, **sum_mu_dist; double **sample_tau, *sum_tau, *var_tau; double **sample_sigma, *sum_sigma, *var_sigma; double **sample_delta, *sum_delta, *var_delta; double **sample_gamma, *sum_gamma, *var_gamma; double **sample_varphi, *sum_varphi, *var_varphi; double var_fix, avg_fix, *var_ran, *avg_ran, avg_beta, var_beta; MM = atoi(argv[1]); //Set Random Seed ltime = time(NULL); stime = (unsigned int)ltime / 2; srand(stime); printf("nseed = %d\n", stime); //Input Number of Thread /* */ //Input Parameters inp = fopen("DATA/parameter.txt", "r"); if (inp == NULL) { printf("Can't open data file\n"); return 0; } fscanf(inp, "%d", &niter); fscanf(inp, "%d", &nburn); fscanf(inp, "%d", &thin); fscanf(inp, "%d", &print); fscanf(inp, "%d", &repeat); fscanf(inp, "%lf", &jump_beta); fscanf(inp, "%lf", &jump_theta); fscanf(inp, "%lf", &jump_mu); fscanf(inp, "%lf", &jump_W); fclose(inp); //The Number of Respondents by Schools ncount = ivector(1, nSCHOOL); inp = fopen("DATA/count.txt", "r"); for (i = 1; i <= nSCHOOL; i++) fscanf(inp, "%d", &ncount[i]); fclose(inp); jump_Z = dvector(1, 10); inp = fopen("DATA/jumprule.txt", "r"); for (i = 1; i <= 10; i++) fscanf(inp, "%lf", &jump_Z[i]); fclose(inp); jump_index = imatrix(1, nSCHOOL, 1, nITEM); inp = fopen("DATA/jumpitem.txt", "r"); for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nITEM; j++) fscanf(inp, "%d", &jump_index[i][j]); fclose(inp); //Declare typedef structure and set array of variables in typedef structure totalsize = sizeof(SCHOOL) + sizeof(int) * (nMAX + 1) * (nITEM + 1); totalsize += sizeof(int) * (nMAX + 1) + sizeof(int) * (nITEM + 1); totalsize += sizeof(int) * (nITEM + 1) * (nMAX + 1) * (nMAX + 1); totalsize += sizeof(int) * (nMAX + 1) * (nITEM + 1) * (nITEM + 1); totalsize += sizeof(double) * ((nITEM + 1) * 2 + (nMAX + 1) * 2) + sizeof(double) * ((nITEM + 1) * (nITEM + 1) * 2); totalsize += sizeof(double) * ((nMAX + 1) * (nDIM + 1) * 4 + (nITEM + 1) * (nDIM + 1) * 2); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) * (nITEM + 1) + (nITEM + 1) * 3); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) * (nMAX + 1) + (nMAX + 1) * 3); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) * ((nMAX + 1) * (nDIM + 1) + (nITEM + 1) * (nDIM + 1))); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) + (nDIM + 1)); totalsize += sizeof(double) * ((nMAX + 1) * (nDIM + 1) * 2 + (nITEM + 1) * (nDIM + 1) * 2 + (nMAX + 1) + (nITEM + 1)); totalsize += sizeof(double) * ((nITEM + 1) * (nITEM + 1) * 3); SCHOOL = (YEWON *) malloc(totalsize * (nSCHOOL + 1)); for (k = 0; k <= nSCHOOL; k++) { SCHOOL[k].cbsize = totalsize; SCHOOL[k].dataset = (int **)malloc(sizeof(int *) * (nMAX + 1)); SCHOOL[k].count_samp = (int *)malloc(sizeof(int *) * (nMAX + 1)); SCHOOL[k].count_item = (int *)malloc(sizeof(int *) * (nITEM + 1)); SCHOOL[k].Y = (int ***)malloc(sizeof(int **) * (nITEM + 1)); SCHOOL[k].U = (int ***)malloc(sizeof(int **) * (nMAX + 1)); SCHOOL[k].oldbeta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].newbeta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].oldtheta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].newtheta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].old_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].new_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].old_Zmean = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].new_Zmean = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].old_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].new_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].mean_Z = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].sample_beta = (double **)malloc(sizeof(double *) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_theta = (double **)malloc(sizeof(double *) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_sigma = (double *)malloc(sizeof(double) * ((niter - nburn) / thin + 1)); SCHOOL[k].sum_beta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].var_beta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].acc_beta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].sum_theta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].var_theta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].acc_theta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].sample_Zsamp = (double ***)malloc(sizeof(double **) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_Zitem = (double ***)malloc(sizeof(double **) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].sum_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].var_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].acc_Zsamp = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].sum_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].var_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].acc_Zitem = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].old_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].new_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].sum_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].var_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); for (i = 0; i <= nMAX; i++) SCHOOL[k].dataset[i] = (int *)malloc(sizeof(int) * (nITEM + 1)); for (i = 0; i <= nITEM; i++) { SCHOOL[k].Y[i] = (int **)malloc(sizeof(int *) * (nMAX + 1)); for (a = 0; a <= nMAX; a++) SCHOOL[k].Y[i][a] = (int *)malloc(sizeof(int) * (nMAX + 1)); } for (i = 0; i <= nMAX; i++) { SCHOOL[k].U[i] = (int **)malloc(sizeof(int *) * (nITEM + 1)); for (a = 0; a <= nITEM; a++) SCHOOL[k].U[i][a] = (int *)malloc(sizeof(int) * (nITEM + 1)); } for (i = 0; i <= nMAX; i++) { SCHOOL[k].old_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].new_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].old_Zmean[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].new_Zmean[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nITEM; i++) { SCHOOL[k].old_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].new_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= (niter - nburn) / thin; i++) { SCHOOL[k].sample_beta[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].sample_theta[i] = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].sample_Zsamp[i] = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].sample_Zitem[i] = (double **)malloc(sizeof(double *) * (nITEM + 1)); for (j = 0; j <= nMAX; j++) SCHOOL[k].sample_Zsamp[i][j] = (double *)malloc(sizeof(double) * (nDIM + 1)); for (j = 0; j <= nITEM; j++) SCHOOL[k].sample_Zitem[i][j] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nMAX; i++) { SCHOOL[k].sum_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].var_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nITEM; i++) { SCHOOL[k].sum_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].var_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nITEM; i++) { SCHOOL[k].sample_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].old_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].new_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].sum_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].var_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); } printf("MEMORY SETTING: %.2d\n", k); } count = ivector(1, nSCHOOL); oldmu = dmatrix(1, nITEM * (nITEM - 1) / 2, 0, nSCHOOL); olddelta = dvector(1, nITEM * (nITEM - 1) / 2); oldsigma = dvector(1, nSCHOOL); oldtau = dvector(1, nITEM * (nITEM - 1) / 2); oldgamma = dvector(1, nITEM); oldvarphi = dvector(1, nITEM); sample_sigma = dmatrix(1, (niter - nburn) / thin, 1, nSCHOOL); sample_delta = dmatrix(1, (niter - nburn) / thin, 1, nITEM * (nITEM - 1) / 2); sample_tau = dmatrix(1, (niter - nburn) / thin, 1, nITEM * (nITEM - 1) / 2); sample_gamma = dmatrix(1, (niter - nburn) / thin, 1, nITEM); sample_varphi = dmatrix(1, (niter - nburn) / thin, 1, nITEM); sum_mu = dmatrix(1, nITEM * (nITEM - 1) / 2, 0, nSCHOOL); sum_tau = dvector(1, nITEM * (nITEM - 1) / 2); var_tau = dvector(1, nITEM * (nITEM - 1) / 2); sum_sigma = dvector(1, nSCHOOL); var_sigma = dvector(1, nSCHOOL); sum_delta = dvector(1, nITEM * (nITEM - 1) / 2); var_delta = dvector(1, nITEM * (nITEM - 1) / 2); sum_gamma = dvector(1, nITEM); var_gamma = dvector(1, nITEM); sum_varphi = dvector(1, nITEM); var_varphi = dvector(1, nITEM); mu_dist = dmatrix(1, nSCHOOL, 1, nSCHOOL); sum_mu_dist = dmatrix(1, nSCHOOL, 1, nSCHOOL); avg_ran = dvector(1, nSCHOOL); var_ran = dvector(1, nSCHOOL); frname[0] = 'D'; frname[1] = 'A'; frname[2] = 'T'; frname[3] = 'A'; frname[4] = '/'; frname[5] = 'i'; frname[6] = 't'; frname[7] = 'e'; frname[8] = 'm'; frname[11] = '.'; frname[12] = 't'; frname[13] = 'x'; frname[14] = 't'; frname[15] = '\0'; for (k = 0; k <= nSCHOOL; k++) { for (i = 0; i <= nMAX; i++) SCHOOL[k].count_samp[i] = 0; for (i = 0; i <= nITEM; i++) SCHOOL[k].count_item[i] = 0; for (i = 0; i <= nMAX; i++) for (j = 0; j <= nITEM; j++) SCHOOL[k].dataset[i][j] = 0; for (i = 0; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = SCHOOL[k].newbeta[i] = 0.0; for (i = 0; i <= nMAX; i++) SCHOOL[k].oldtheta[i] = SCHOOL[k].newtheta[i] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nITEM; j++) SCHOOL[k].old_item_mat[i][j] = SCHOOL[k].new_item_mat[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (a = 0; a <= nMAX; a++) for (b = 0; b <= nMAX; b++) SCHOOL[k].Y[i][a][b] = 0; for (i = 0; i <= nMAX; i++) for (a = 0; a <= nITEM; a++) for (b = 0; b <= nITEM; b++) SCHOOL[k].U[i][a][b] = 0; for (i = 0; i <= nMAX; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] = SCHOOL[k].new_Zmean[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = 0.0; for (i = 0; i <= (niter - nburn) / thin; i++) { SCHOOL[k].sample_sigma[i] = 0.0; for (j = 0; j <= nITEM; j++) SCHOOL[k].sample_beta[i][j] = 0.0; for (j = 0; j <= nMAX; j++) SCHOOL[k].sample_theta[i][j] = 0.0; for (a = 0; a <= nMAX; a++) for (b = 0; b <= nDIM; b++) SCHOOL[k].sample_Zsamp[i][a][b] = 0.0; for (a = 0; a <= nITEM; a++) for (b = 0; b <= nDIM; b++) SCHOOL[k].sample_Zitem[i][a][b] = 0.0; } SCHOOL[k].oldsigma = 0.0; SCHOOL[k].sum_sigma = SCHOOL[k].var_sigma = 0.0; for (i = 0; i <= nDIM; i++) SCHOOL[k].mean_Z[i] = 0.0; for (i = 0; i <= nITEM; i++) SCHOOL[k].var_beta[i] = SCHOOL[k].sum_beta[i] = SCHOOL[k].acc_beta[i] = 0.0; for (i = 0; i <= nMAX; i++) SCHOOL[k].var_theta[i] = SCHOOL[k].sum_theta[i] = SCHOOL[k].acc_theta[i] = 0.0; for (i = 0; i <= nMAX; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].sum_Zsamp[i][j] = SCHOOL[k].var_Zsamp[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].sum_Zitem[i][j] = SCHOOL[k].var_Zitem[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nITEM; j++) SCHOOL[k].sample_item_mat[i][j] = SCHOOL[k].sum_item_mat[i][j] = SCHOOL[k].var_item_mat[i][j] = 0.0; for (i = 0; i <= nMAX; i++) SCHOOL[k].acc_Zsamp[i] = 0.0; for (i = 0; i <= nITEM; i++) SCHOOL[k].acc_Zitem[i] = 0.0; if (k != 0) count[k] = 0; if (k != 0) { if (k < 10) { frname[9] = (char)(48); frname[10] = (char)(k + 48); } else { frname[9] = (char)(k / 10 + 48); frname[10] = (char)(k % 10 + 48); } inp = fopen(frname, "r"); printf("Currently Reading %s\n", frname); if (inp == NULL) { printf("Cannot open data file\n"); return 0; } for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nITEM; j++) { fscanf(inp, "%d", &SCHOOL[k].dataset[i][j]); SCHOOL[k].count_samp[i] += SCHOOL[k].dataset[i][j]; SCHOOL[k].count_item[j] += SCHOOL[k].dataset[i][j]; } fclose(inp); printf("%.2d\n", k); for (i = 1; i <= ncount[k]; i++) { for (j = 1; j <= nITEM; j++) printf("%d ", SCHOOL[k].dataset[i][j]); printf("\n"); } for (i = 1; i <= nITEM; i++) for (a = 2; a <= ncount[k]; a++) for (b = 1; b < a; b++) { SCHOOL[k].Y[i][a][b] = SCHOOL[k].dataset[a][i] * SCHOOL[k].dataset[b][i]; SCHOOL[k].Y[i][b][a] = SCHOOL[k].Y[i][a][b]; } for (a = 1; a <= ncount[k]; a++) for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) { SCHOOL[k].U[a][i][j] = SCHOOL[k].dataset[a][i] * SCHOOL[k].dataset[a][j]; SCHOOL[k].U[a][j][i] = SCHOOL[k].U[a][i][j]; } } printf("INITIALIZATION AND DATA LOADING: %.2d\n", k); } for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { oldtau[i] = olddelta[i] = 0.0; for (j = 1; j <= nSCHOOL; j++) oldmu[i][j] = 0.0; } for (i = 1; i <= nSCHOOL; i++) oldsigma[i] = 0.0; //Declare Additional Variables sample_samp_like = dvector(1, nMAX); old_samp_distance = dvector(1, nMAX); new_samp_distance = dvector(1, nMAX); sample_item_like = dvector(1, nITEM); old_item_distance = dvector(1, nITEM); new_item_distance = dvector(1, nITEM); pr_var_Z = sqrt(2.0); for (v = 0; v < repeat; v++) { //Initialize Variables for (k = 1; k <= nSCHOOL; k++) { for (i = 1; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = SCHOOL[k].newbeta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) SCHOOL[k].oldtheta[i] = SCHOOL[k].newtheta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] = SCHOOL[k].new_Zmean[i][j] = 0.0; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = 0.0; for (i = 1; i <= (niter - nburn) / thin; i++) { SCHOOL[k].sample_sigma[i] = 0.0; for (j = 1; j <= nITEM; j++) SCHOOL[k].sample_beta[i][j] = 0.0; for (j = 1; j <= ncount[k]; j++) SCHOOL[k].sample_theta[i][j] = 0.0; for (a = 1; a <= ncount[k]; a++) for (b = 1; b <= nDIM; b++) SCHOOL[k].sample_Zsamp[i][a][b] = 0.0; for (a = 1; a <= nITEM; a++) for (b = 1; b <= nDIM; b++) SCHOOL[k].sample_Zitem[i][a][b] = 0.0; } for (i = 1; i <= nITEM; i++) SCHOOL[k].var_beta[i] = SCHOOL[k].sum_beta[i] = SCHOOL[k].acc_beta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) SCHOOL[k].var_theta[i] = SCHOOL[k].sum_theta[i] = SCHOOL[k].acc_theta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].sum_Zsamp[i][j] = SCHOOL[k].var_Zsamp[i][j] = 0.0; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].sum_Zitem[i][j] = SCHOOL[k].var_Zitem[i][j] = 0.0; for (i = 1; i <= nITEM; i++) SCHOOL[k].acc_Zitem[i] = 0.0; for (i = 1; i <= nMAX; i++) SCHOOL[k].acc_Zsamp[i] = 0.0; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nITEM; j++) { SCHOOL[k].sample_item_mat[i][j] = 0.0; SCHOOL[k].old_item_mat[i][j] = SCHOOL[k].new_item_mat[i][j] = 0.0; SCHOOL[k].sum_item_mat[i][j] = SCHOOL[k].var_item_mat[i][j] = 0.0; } for (i = 0; i <= nDIM; i++) SCHOOL[k].mean_Z[i] = 0.0; SCHOOL[k].oldsigma = SCHOOL[k].sum_sigma = SCHOOL[k].var_sigma = 0.0; count[k] = 0; } for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { olddelta[i] = oldtau[i] = 0.0; sum_delta[i] = var_delta[i] = 0.0; sum_tau[i] = var_tau[i] = 0.0; for (j = 1; j <= (niter - nburn) / thin; j++) sample_tau[j][i] = sample_delta[j][i] = 0.0; } for (i = 1; i <= nSCHOOL; i++) { oldsigma[i] = 0.0; sum_sigma[i] = var_sigma[i] = 0.0; for (j = 1; j <= (niter - nburn) / thin; j++) sample_sigma[j][i] = 0.0; } for (k = 1; k <= nSCHOOL; k++) for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) oldmu[i][k] = sum_mu[i][k] = 0.0; for (i = 1; i <= nITEM; i++) { oldgamma[i] = oldvarphi[i] = 0.0; sum_gamma[i] = var_gamma[i] = 0.0; sum_varphi[i] = var_varphi[i] = 0.0; for (j = 1; j <= (niter - nburn) / thin; j++) sample_gamma[j][i] = sample_varphi[j][i] = 0.0; } for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nSCHOOL; j++) sum_mu_dist[i][j] = 0.0; //Generate Initial Values for beta ,Z, sigma for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { olddelta[i] = -1.5 + 3.0 * rand() / RAND_MAX; oldtau[i] = 100.0; for (j = 1; j <= nSCHOOL; j++) oldmu[i][j] = -1.5 + 3.0 * rand() / RAND_MAX; } for (i = 1; i <= nSCHOOL; i++) oldsigma[i] = 100.0; for (i = 1; i <= nITEM; i++) { oldgamma[i] = -1.5 + 3.0 * rand() / RAND_MAX; oldvarphi[i] = 100.0; } for (k = 1; k <= nSCHOOL; k++) { SCHOOL[k].oldsigma = 0.05 * 0.05; for (i = 1; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = -1.5 + 3.0 * rand() / RAND_MAX; for (i = 1; i <= ncount[k]; i++) SCHOOL[k].oldtheta[i] = -1.5 + 3.0 * rand() / RAND_MAX; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = -1.5 + 3.0 * rand() / RAND_MAX; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) for (a = 1; a <= ncount[k]; a++) if (SCHOOL[k].dataset[a][i] == 1) SCHOOL[k].old_Zmean[a][j] += SCHOOL[k].old_Zitem[i][j] / (SCHOOL[k].count_samp[a] * 1.0); for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].new_Zmean[i][j] = SCHOOL[k].old_Zmean[i][j]; for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] + sqrt(SCHOOL[k].oldsigma) * gasdev(); for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) { for (l = 1; l <= nDIM; l++) SCHOOL[k].old_item_mat[i][j] += pow((SCHOOL[k].old_Zitem[i][l] - SCHOOL[k].old_Zitem[j][l]), 2.0); SCHOOL[k].old_item_mat[i][j] = sqrt(SCHOOL[k].old_item_mat[i][j]); SCHOOL[k].old_item_mat[j][i] = SCHOOL[k].old_item_mat[i][j]; } for (i = 1; i <= nITEM; i++) for (j = 1; j <= nITEM; j++) SCHOOL[k].new_item_mat[i][j] = SCHOOL[k].old_item_mat[i][j]; } //MCMC Implementation for Parameter Estimation frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'i'; frname[9] = 'm'; frname[10] = '_'; frname[12] = (char)(48 + MM); frname[13] = '.'; frname[14] = 'l'; frname[15] = 'o'; frname[16] = 'g'; frname[17] = '\0'; frname[11] = 's'; HUR = fopen(frname, "a"); frname[11] = 'l'; JYW = fopen(frname, "a"); frname[11] = 'u'; OUT = fopen(frname, "a"); frname[11] = 'g'; JIN = fopen(frname, "a"); frname[11] = 'p'; PRT = fopen(frname, "a"); frname[11] = 'a'; JJW = fopen(frname, "a"); gcount = mcount = 0; for (iter = 1; iter <= niter; iter++) { for (a = 1; a <= nSCHOOL; a++) { for (i = 1; i <= nITEM; i++) { // for (j = 1; j <= nDIM; j++) { SCHOOL[a].new_Zitem[i][j] = SCHOOL[a].old_Zitem[i][j] + jump_Z[jump_index[a][i]] * gasdev(); for (k = 1; k <= ncount[a]; k++) if (SCHOOL[a].dataset[k][i] == 1) { SCHOOL[a].new_Zmean[k][j] -= SCHOOL[a].old_Zitem[i][j] / (SCHOOL[a].count_samp[k] * 1.0); SCHOOL[a].new_Zmean[k][j] += SCHOOL[a].new_Zitem[i][j] / (SCHOOL[a].count_samp[k] * 1.0); } } for (ind = 1; ind <= nITEM; ind++) sample_item_like[ind] = old_item_distance[ind] = new_item_distance[ind] = 0.0; // for (ind = 1; ind <= nITEM; ind++) if (ind != i) { for (l = 1; l <= nDIM; l++) { old_item_distance[ind] += pow((SCHOOL[a].old_Zitem[ind][l] - SCHOOL[a].old_Zitem[i][l]), 2.0); new_item_distance[ind] += pow((SCHOOL[a].new_Zitem[ind][l] - SCHOOL[a].new_Zitem[i][l]), 2.0); } old_item_distance[ind] = sqrt(old_item_distance[ind]); new_item_distance[ind] = sqrt(new_item_distance[ind]); SCHOOL[a].new_item_mat[ind][i] = new_item_distance[ind]; SCHOOL[a].new_item_mat[i][ind] = SCHOOL[a].new_item_mat[ind][i]; SCHOOL[a].old_item_mat[ind][i] = old_item_distance[ind]; SCHOOL[a].old_item_mat[i][ind] = SCHOOL[a].old_item_mat[ind][i]; for (k = 1; k <= ncount[a]; k++) { if (SCHOOL[a].U[k][ind][i] == 1) { sample_item_like[ind] -= -log(1.0 + exp(-(SCHOOL[a].oldtheta[k] - old_item_distance[ind]))); sample_item_like[ind] += -log(1.0 + exp(-(SCHOOL[a].oldtheta[k] - new_item_distance[ind]))); } else { sample_item_like[ind] -= -log(1.0 + exp(SCHOOL[a].oldtheta[k] - old_item_distance[ind])); sample_item_like[ind] += -log(1.0 + exp(SCHOOL[a].oldtheta[k] - new_item_distance[ind])); } } } update_like_item = 0.0; for (ind = 1; ind <= nITEM; ind++) update_like_item += sample_item_like[ind]; num = den = 0.0; for (j = 2; j <= nITEM; j++) for (k = 1; k < j; k++) { if (SCHOOL[a].new_item_mat[j][k] > 0.0001) num += dlognorm(log(SCHOOL[a].new_item_mat[j][k]), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); else num += dlognorm(log(0.0001), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); if (SCHOOL[a].old_item_mat[j][k] > 0.0001) den += dlognorm(log(SCHOOL[a].old_item_mat[j][k]), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); else den += dlognorm(log(0.0001), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); //printf("%d %d-%.3f %.3f %.3f %.3f %.3f\n", j, k, num, den, oldmu[((j - 1) * (j - 2) / 2 + k)][a], log(SCHOOL[a].new_item_mat[j][k]), log(SCHOOL[a].old_item_mat[j][k])); } ratio = update_like_item + (num - den); //printf("SCHOOL-%.2d, ITEM-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } if (accept == 1) { for (j = 1; j <= nDIM; j++) { SCHOOL[a].old_Zitem[i][j] = SCHOOL[a].new_Zitem[i][j]; for (k = 1; k <= ncount[a]; k++) if (SCHOOL[a].dataset[k][i] == 1) SCHOOL[a].old_Zmean[k][j] = SCHOOL[a].new_Zmean[k][j]; } SCHOOL[a].acc_Zitem[i] += 1.0 / niter; for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) SCHOOL[a].old_item_mat[j][k] = SCHOOL[a].new_item_mat[j][k]; } else { for (j = 1; j <= nDIM; j++) { SCHOOL[a].new_Zitem[i][j] = SCHOOL[a].old_Zitem[i][j]; for (k = 1; k <= ncount[a]; k++) if (SCHOOL[a].dataset[k][i] == 1) SCHOOL[a].new_Zmean[k][j] = SCHOOL[a].old_Zmean[k][j]; } for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) SCHOOL[a].new_item_mat[j][k] = SCHOOL[a].old_item_mat[j][k]; } } for (i = 1; i <= ncount[a]; i++) { for (j = 1; j <= nDIM; j++) SCHOOL[a].new_Zsamp[i][j] = SCHOOL[a].old_Zsamp[i][j] + jump_W * gasdev(); for (ind = 1; ind <= ncount[a]; ind++) sample_samp_like[ind] = old_samp_distance[ind] = new_samp_distance[ind] = 0.0; // for (ind = 1; ind <= ncount[a]; ind++) if (ind != i) { for (l = 1; l <= nDIM; l++) { old_samp_distance[ind] += pow((SCHOOL[a].old_Zsamp[ind][l] - SCHOOL[a].old_Zsamp[i][l]), 2.0); new_samp_distance[ind] += pow((SCHOOL[a].old_Zsamp[ind][l] - SCHOOL[a].new_Zsamp[i][l]), 2.0); } old_samp_distance[ind] = sqrt(old_samp_distance[ind]); new_samp_distance[ind] = sqrt(new_samp_distance[ind]); for (k = 1; k <= nITEM; k++) { if (SCHOOL[a].Y[k][ind][i] == 1) { sample_samp_like[ind] -= -log(1.0 + exp(-(SCHOOL[a].oldbeta[k] - old_samp_distance[ind]))); sample_samp_like[ind] += -log(1.0 + exp(-(SCHOOL[a].oldbeta[k] - new_samp_distance[ind]))); } else { sample_samp_like[ind] -= -log(1.0 + exp(SCHOOL[a].oldbeta[k] - old_samp_distance[ind])); sample_samp_like[ind] += -log(1.0 + exp(SCHOOL[a].oldbeta[k] - new_samp_distance[ind])); } } } update_like_samp = 0.0; for (ind = 1; ind <= ncount[a]; ind++) update_like_samp += sample_samp_like[ind]; //printf("SCHOOL-%.2d, PERSON-%.2d: LIKELIHOOD_PERSON-%.3f\n", a, i, update_like_samp); num = den = 0.0; //printf("SCHOOL-%.2d, PERSON-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); for (j = 1; j <= nDIM; j++) { num += dlognorm(SCHOOL[a].new_Zsamp[i][j], SCHOOL[a].old_Zmean[i][j], sqrt(SCHOOL[a].oldsigma)); den += dlognorm(SCHOOL[a].old_Zsamp[i][j], SCHOOL[a].old_Zmean[i][j], sqrt(SCHOOL[a].oldsigma)); } ratio = update_like_samp + (num - den); //printf("SCHOOL-%.2d, PERSON-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } if (accept == 1) { for (j = 1; j <= nDIM; j++) SCHOOL[a].old_Zsamp[i][j] = SCHOOL[a].new_Zsamp[i][j]; SCHOOL[a].acc_Zsamp[i] += 1.0 / niter; } else { for (j = 1; j <= nDIM; j++) SCHOOL[a].new_Zsamp[i][j] = SCHOOL[a].old_Zsamp[i][j]; } } SCHOOL[a].post_a = prior_a; SCHOOL[a].post_b = prior_b; for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) { SCHOOL[a].post_a += 0.5; SCHOOL[a].post_b += 0.5 * (SCHOOL[a].old_Zsamp[i][j] - SCHOOL[a].old_Zmean[i][j]) * (SCHOOL[a].old_Zsamp[i][j] - SCHOOL[a].old_Zmean[i][j]); } SCHOOL[a].oldsigma = 1.0 / Rgamma(SCHOOL[a].post_a, SCHOOL[a].post_b); //2. Update $ \ beta_i$ from the proposal distribution $ \ phi_2(\cdot) $ // for (i = 1; i <= nITEM; i++) { old_like_beta = cost_beta(i, SCHOOL[a].oldbeta[i], a); SCHOOL[a].newbeta[i] = SCHOOL[a].oldbeta[i] + jump_beta * gasdev(); if (fabs(SCHOOL[a].newbeta[i]) < 7.0) { new_like_beta = cost_beta(i, SCHOOL[a].newbeta[i], a); num = new_like_beta; den = old_like_beta; num += dlognorm(SCHOOL[a].oldbeta[i], oldgamma[i], sqrt(oldvarphi[i])); den += dlognorm(SCHOOL[a].newbeta[i], oldgamma[i], sqrt(oldvarphi[i])); ratio = num - den; if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } } else accept = 0; if (accept == 1) { SCHOOL[a].oldbeta[i] = SCHOOL[a].newbeta[i]; SCHOOL[a].acc_beta[i] += 1.0 / niter; } else SCHOOL[a].newbeta[i] = SCHOOL[a].oldbeta[i]; } // for (i = 1; i <= ncount[a]; i++) { old_like_theta = cost_theta(i, SCHOOL[a].oldtheta[i], a); SCHOOL[a].newtheta[i] = SCHOOL[a].oldtheta[i] + jump_theta * gasdev(); new_like_theta = cost_theta(i, SCHOOL[a].newtheta[i], a); num = dlognorm(SCHOOL[a].newtheta[i], pr_mean_theta, pr_var_theta) + new_like_theta; den = dlognorm(SCHOOL[a].oldtheta[i], pr_mean_theta, pr_var_theta) + old_like_theta; ratio = num - den; if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } if (accept == 1) { SCHOOL[a].oldtheta[i] = SCHOOL[a].newtheta[i]; SCHOOL[a].acc_theta[i] += 1.0 / niter; } else SCHOOL[a].newtheta[i] = SCHOOL[a].oldtheta[i]; } //Save MCMC Results to Files and Repository Variables if (iter > nburn && iter % thin == 0) { count[a]++; for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].sample_Zsamp[count[a]][i][j] = SCHOOL[a].old_Zsamp[i][j]; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].sample_Zitem[count[a]][i][j] = SCHOOL[a].old_Zitem[i][j]; for (i = 1; i <= nITEM; i++) SCHOOL[a].sample_beta[count[a]][i] = SCHOOL[a].oldbeta[i]; for (i = 1; i <= ncount[a]; i++) SCHOOL[a].sample_theta[count[a]][i] = SCHOOL[a].oldtheta[i]; SCHOOL[a].sample_sigma[count[a]] = SCHOOL[a].oldsigma; } //Print MCMC Results to Screen if (iter % print == 0) { printf("%.5d-BETA%.2d ", iter, a); for (i = 1; i <= nITEM; i++) printf("% .4f ", SCHOOL[a].oldbeta[i]); printf("%.4f\n", SCHOOL[a].oldsigma); } } // for (i = 1; i <= nITEM; i++) { school_a = prior_a; school_b = prior_b; for (j = 1; j <= nSCHOOL; j++) { school_a += 0.5; school_b += 0.5 * (SCHOOL[j].oldbeta[i] - oldgamma[i]) * (SCHOOL[j].oldbeta[i] - oldgamma[i]); } oldvarphi[i] = 1.0 / Rgamma(school_a, school_b); var_beta = 1.0 / (1.0 / pr_var_gamma + nSCHOOL / oldvarphi[i]); avg_beta = 0.0; for (j = 1; j <= nSCHOOL; j++) avg_beta += SCHOOL[j].oldbeta[i] / nSCHOOL; avg_beta *= var_beta * (nSCHOOL / oldvarphi[i]); oldgamma[i] = avg_beta + sqrt(var_beta) * gasdev(); } for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) { post_a = prior_a; post_b = prior_b; for (k = 1; k <= nSCHOOL; k++) { post_a += 0.5; if (SCHOOL[k].old_item_mat[i][j] > 0.0001) post_b += 0.5 * (log(SCHOOL[k].old_item_mat[i][j]) - olddelta[((i - 1) * (i - 2) / 2 + j)]) * (log(SCHOOL[k].old_item_mat[i][j]) - olddelta[((i - 1) * (i - 2) / 2 + j)]); else post_b += 0.5 * (log(0.0001) - olddelta[((i - 1) * (i - 2) / 2 + j)]) * (log(0.0001) - olddelta[((i - 1) * (i - 2) / 2 + j)]); } oldtau[((i - 1) * (i - 2) / 2 + j)] = 1.0 / Rgamma(post_a, post_b); var_fix = 1.0 / (1.0 / pr_var_delta + nSCHOOL / oldtau[((i - 1) * (i - 2) / 2 + j)]); avg_fix = 0.0; for (k = 1; k <= nSCHOOL; k++) { if (SCHOOL[k].old_item_mat[i][j] > 0.0001) avg_fix += (1.0 / oldtau[((i - 1) * (i - 2) / 2 + j)]) * log(SCHOOL[k].old_item_mat[i][j]); else avg_fix += (1.0 / oldtau[((i - 1) * (i - 2) / 2 + j)]) * log(0.0001); } avg_fix *= var_fix; olddelta[((i - 1) * (i - 2) / 2 + j)] = avg_fix + sqrt(var_fix) * gasdev(); } if (iter % print == 0) for (i = 1; i <= nITEM; i++) { printf("%.5d-GAMMA, VARPHI, ITEM%.2d: ", iter, i); printf("% .4f %.4f\n", oldgamma[i], oldvarphi[i]); } if (iter > nburn && iter % thin == 0) { gcount++; for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { sample_tau[gcount][i] = sqrt(oldtau[i]); sample_delta[gcount][i] = olddelta[i]; fprintf(JYW, "% .4f ", sample_delta[gcount][i]); fprintf(OUT, "%.4f ", sample_tau[gcount][i]); } for (i = 1; i <= nSCHOOL; i++) { sample_sigma[gcount][i] = sqrt(oldsigma[i]); fprintf(HUR, "%.4f ", sample_sigma[gcount][i]); } for (k = 1; k <= nSCHOOL; k++) for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) sum_mu[i][k] += oldmu[i][k] / ((niter - nburn) / thin); for (i = 1; i <= nITEM; i++) { sample_gamma[gcount][i] = oldgamma[i]; sample_varphi[gcount][i] = sqrt(oldvarphi[i]); fprintf(JIN, "% .4f ", sample_gamma[gcount][i]); fprintf(PRT, "%.4f ", sample_varphi[gcount][i]); } for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nSCHOOL; j++) mu_dist[i][j] = 0.0; for (k = 1; k <= nITEM * (nITEM - 1) / 2; k++) for (i = 2; i <= nSCHOOL; i++) for (j = 1; j < i; j++) mu_dist[i][j] += (oldmu[k][i] - oldmu[k][j]) * (oldmu[k][i] - oldmu[k][j]); for (i = 2; i <= nSCHOOL; i++) for (j = 1; j < i; j++) mu_dist[j][i] = mu_dist[i][j]; for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nSCHOOL; j++) sum_mu_dist[i][j] += sqrt(mu_dist[i][j]) / ((niter - nburn) / thin); for (i = 2; i <= nSCHOOL; i++) for (j = 1; j < i; j++) fprintf(JJW, "%.4f ", sqrt(mu_dist[i][j])); fprintf(HUR, "\n"); fprintf(OUT, "\n"); fprintf(JYW, "\n"); fprintf(JIN, "\n"); fprintf(PRT, "\n"); fprintf(JJW, "\n"); } } fclose(HUR); fclose(JYW); fclose(OUT); fclose(JIN); fclose(PRT); fclose(JJW); frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'i'; frname[9] = 'm'; frname[12] = '_'; frname[14] = (char)(48 + MM); frname[15] = '.'; frname[16] = 'l'; frname[17] = 'o'; frname[18] = 'g'; frname[19] = '\0'; for (a = 1; a <= nSCHOOL; a++) { if (a < 10) { frname[10] = (char)(48); frname[11] = (char)(a + 48); } else { frname[10] = (char)(a / 10 + 48); frname[11] = (char)(a % 10 + 48); } frname[13] = 'z'; JIN = fopen(frname, "a"); frname[13] = 'b'; HUR = fopen(frname, "a"); frname[13] = 't'; OUT = fopen(frname, "a"); frname[13] = 'i'; JYW = fopen(frname, "a"); frname[13] = 'h'; ASA = fopen(frname, "a"); for (k = 1; k <= count[a]; k++) { for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "% .4f ", SCHOOL[a].sample_Zsamp[k][i][j]); fprintf(JIN, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "% .4f ", SCHOOL[a].sample_Zitem[k][i][j]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "% .4f ", SCHOOL[a].sample_beta[k][i]); fprintf(HUR, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "% .4f ", SCHOOL[a].sample_theta[k][i]); fprintf(OUT, "\n"); fprintf(ASA, "%.4f\n", SCHOOL[a].sample_sigma[k]); } fclose(JIN); fclose(HUR); fclose(OUT); fclose(JYW); fclose(ASA); } //Calculate Mean and Variance of MCMC Estimators for (a = 1; a <= nSCHOOL; a++) { for (i = 1; i <= count[a]; i++) { SCHOOL[a].sum_sigma += SCHOOL[a].sample_sigma[i] / count[a]; SCHOOL[a].var_sigma += SCHOOL[a].sample_sigma[i] * SCHOOL[a].sample_sigma[i] / (count[a] - 1); for (j = 1; j <= nITEM; j++) { SCHOOL[a].sum_beta[j] += SCHOOL[a].sample_beta[i][j] / count[a]; SCHOOL[a].var_beta[j] += SCHOOL[a].sample_beta[i][j] * SCHOOL[a].sample_beta[i][j] / (count[a] - 1); } for (j = 1; j <= ncount[a]; j++) { SCHOOL[a].sum_theta[j] += SCHOOL[a].sample_theta[i][j] / count[a]; SCHOOL[a].var_theta[j] += SCHOOL[a].sample_theta[i][j] * SCHOOL[a].sample_theta[i][j] / (count[a] - 1); } for (j = 1; j <= ncount[a]; j++) for (k = 1; k <= nDIM; k++) { SCHOOL[a].sum_Zsamp[j][k] += SCHOOL[a].sample_Zsamp[i][j][k] / count[a]; SCHOOL[a].var_Zsamp[j][k] += SCHOOL[a].sample_Zsamp[i][j][k] * SCHOOL[a].sample_Zsamp[i][j][k] / (count[a] - 1); } for (j = 1; j <= nITEM; j++) for (k = 1; k <= nDIM; k++) { SCHOOL[a].sum_Zitem[j][k] += SCHOOL[a].sample_Zitem[i][j][k] / count[a]; SCHOOL[a].var_Zitem[j][k] += SCHOOL[a].sample_Zitem[i][j][k] * SCHOOL[a].sample_Zitem[i][j][k] / (count[a] - 1); } for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) SCHOOL[a].sample_item_mat[j][k] = 0.0; for (j = 2; j <= nITEM; j++) for (k = 1; k < j; k++) for (l = 1; l <= nDIM; l++) SCHOOL[a].sample_item_mat[j][k] += pow((SCHOOL[a].sample_Zitem[i][j][l] - SCHOOL[a].sample_Zitem[i][k][l]), 2.0); for (j = 2; j <= nITEM; j++) for (k = 1; k < j; k++) SCHOOL[a].sample_item_mat[k][j] = SCHOOL[a].sample_item_mat[j][k]; for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) { SCHOOL[a].sum_item_mat[j][k] += SCHOOL[a].sample_item_mat[j][k] / count[a]; SCHOOL[a].var_item_mat[j][k] += SCHOOL[a].sample_item_mat[j][k] * SCHOOL[a].sample_item_mat[j][k] / (count[a] - 1); } } SCHOOL[a].var_sigma -= SCHOOL[a].sum_sigma * SCHOOL[a].sum_sigma * count[a] / (count[a] - 1); for (i = 1; i <= nITEM; i++) SCHOOL[a].var_beta[i] -= SCHOOL[a].sum_beta[i] * SCHOOL[a].sum_beta[i] * count[a] / (count[a] - 1); for (i = 1; i <= ncount[a]; i++) SCHOOL[a].var_theta[i] -= SCHOOL[a].sum_theta[i] * SCHOOL[a].sum_theta[i] * count[a] / (count[a] - 1); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].var_Zsamp[i][j] -= SCHOOL[a].sum_Zsamp[i][j] * SCHOOL[a].sum_Zsamp[i][j] * count[a] / (count[a] - 1); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].var_Zitem[i][j] -= SCHOOL[a].sum_Zitem[i][j] * SCHOOL[a].sum_Zitem[i][j] * count[a] / (count[a] - 1); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nITEM; j++) SCHOOL[a].var_item_mat[i][j] -= SCHOOL[a].sum_item_mat[i][j] * SCHOOL[a].sum_item_mat[i][j] * count[a] / (count[a] - 1); } for (i = 1; i <= gcount; i++) { for (j = 1; j <= nITEM * (nITEM - 1) / 2; j++) { sum_tau[j] += sample_tau[i][j] / gcount; sum_delta[j] += sample_delta[i][j] / gcount; var_tau[j] += sample_tau[i][j] * sample_tau[i][j] / (gcount - 1); var_delta[j] += sample_delta[i][j] * sample_delta[i][j] / (gcount - 1); } for (j = 1; j <= nSCHOOL; j++) { sum_sigma[j] += sample_sigma[i][j] / gcount; var_sigma[j] += sample_sigma[i][j] * sample_sigma[i][j] / (gcount - 1); } for (j = 1; j <= nITEM; j++) { sum_gamma[j] += sample_gamma[i][j] / gcount; sum_varphi[j] += sample_varphi[i][j] / gcount; var_gamma[j] += sample_gamma[i][j] * sample_gamma[i][j] / (gcount - 1); var_varphi[j] += sample_varphi[i][j] * sample_varphi[i][j] / (gcount - 1); } } for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { var_tau[i] -= sum_tau[i] * sum_tau[i] * gcount / (gcount - 1); var_delta[i] -= sum_delta[i] * sum_delta[i] * gcount / (gcount - 1); } for (i = 1; i <= nSCHOOL; i++) var_sigma[i] -= sum_sigma[i] * sum_sigma[i] * gcount / (gcount - 1); for (i = 1; i <= nITEM; i++) { var_gamma[i] -= sum_gamma[i] * sum_gamma[i] * gcount / (gcount - 1); var_varphi[i] -= sum_varphi[i] * sum_varphi[i] * gcount / (gcount - 1); } //Save Parameter Estimates frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'u'; frname[9] = 'm'; frname[12] = '_'; frname[14] = (char)(48 + MM); frname[15] = '.'; frname[16] = 'l'; frname[17] = 'o'; frname[18] = 'g'; frname[19] = '\0'; for (a = 1; a <= nSCHOOL; a++) { if (a < 10) { frname[10] = (char)(48); frname[11] = (char)(a + 48); } else { frname[10] = (char)(a / 10 + 48); frname[11] = (char)(a % 10 + 48); } frname[13] = 'z'; JIN = fopen(frname, "a"); frname[13] = 'b'; HUR = fopen(frname, "a"); frname[13] = 't'; OUT = fopen(frname, "a"); frname[13] = 'i'; JYW = fopen(frname, "a"); frname[13] = 'd'; PRT = fopen(frname, "a"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].sum_beta[i]); fprintf(HUR, "\n"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].var_beta[i]); fprintf(HUR, "\n"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].acc_beta[i]); fprintf(HUR, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].sum_theta[i]); fprintf(OUT, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].var_theta[i]); fprintf(OUT, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].acc_theta[i]); fprintf(OUT, "\n"); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].sum_Zsamp[i][j]); fprintf(JIN, "\n"); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].var_Zsamp[i][j]); fprintf(JIN, "\n"); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].acc_Zsamp[i]); fprintf(JIN, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].sum_Zitem[i][j]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].var_Zitem[i][j]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].acc_Zitem[i]); fprintf(JYW, "\n"); for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) fprintf(PRT, "%.4f ", SCHOOL[a].sum_item_mat[i][j]); fprintf(PRT, "\n"); for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) fprintf(PRT, "%.4f ", SCHOOL[a].var_item_mat[i][j]); fprintf(PRT, "\n"); fclose(JIN); fclose(HUR); fclose(OUT); fclose(JYW); fclose(PRT); } frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'u'; frname[9] = 'm'; frname[10] = '_'; frname[12] = (char)(48 + MM); frname[13] = '.'; frname[14] = 'l'; frname[15] = 'o'; frname[16] = 'g'; frname[17] = '\0'; frname[11] = 'm'; JIN = fopen(frname, "a"); frname[11] = 's'; HUR = fopen(frname, "a"); frname[11] = 'l'; JYW = fopen(frname, "a"); frname[11] = 'u'; OUT = fopen(frname, "a"); frname[11] = 'g'; ASA = fopen(frname, "a"); frname[11] = 'p'; PRT = fopen(frname, "a"); frname[11] = 'h'; IMS = fopen(frname, "a"); frname[11] = 'a'; JJW = fopen(frname, "a"); for (i = 1; i <= nSCHOOL; i++) fprintf(HUR, "% .4f ", sum_sigma[i]); fprintf(HUR, "\n"); for (i = 1; i <= nSCHOOL; i++) fprintf(HUR, "% .4f ", var_sigma[i]); fprintf(HUR, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(OUT, "% .4f ", sum_tau[i]); fprintf(OUT, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(OUT, "% .4f ", var_tau[i]); fprintf(OUT, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JYW, "% .4f ", sum_delta[i]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JYW, "% .4f ", var_delta[i]); fprintf(JYW, "\n"); for (k = 1; k <= nSCHOOL; k++) { for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JIN, "% .4f ", sum_mu[i][k]); fprintf(JIN, "\n"); } for (i = 1; i <= nITEM; i++) fprintf(ASA, "% .4f ", sum_gamma[i]); fprintf(ASA, "\n"); for (i = 1; i <= nITEM; i++) fprintf(ASA, "% .4f ", var_gamma[i]); fprintf(ASA, "\n"); for (i = 1; i <= nITEM; i++) fprintf(PRT, "%.4f ", sum_varphi[i]); fprintf(PRT, "\n"); for (i = 1; i <= nITEM; i++) fprintf(PRT, "%.4f ", var_varphi[i]); fprintf(PRT, "\n"); for (k = 1; k <= nSCHOOL; k++) fprintf(IMS, "%.4f ", SCHOOL[k].sum_sigma); fprintf(IMS, "\n"); for (k = 1; k <= nSCHOOL; k++) fprintf(IMS, "%.4f ", SCHOOL[k].var_sigma); fprintf(IMS, "\n"); for (i = 1; i <= nSCHOOL; i++) { for (j = 1; j <= nSCHOOL; j++) fprintf(JJW, "%.4f ", sum_mu_dist[i][j]); fprintf(JJW, "\n"); } fclose(JIN); fclose(HUR); fclose(JYW); fclose(OUT); fclose(ASA); fclose(PRT); fclose(IMS); fclose(JJW); } /* * free_ivector(ncount, 1, nSCHOOL); free_dvector(jump_Z, 0, nITEM); * * for(k = 0; k <= nSCHOOL; k++){ for(i = 0; i <= nMAX; i++) * free(SCHOOL[k].dataset[i]); for(i = 0; i <= nITEM; i++){ for(a = 0; * a <= nMAX; a++) free(SCHOOL[k].Y[i][a]); free(SCHOOL[k].Y[i]); } * (i = 0; i <= nMAX; i++){ for(a = 0; a <= nITEM; a++) * free(SCHOOL[k].U[i][a]); free(SCHOOL[k].U[i]); } for(i = 0; i <= * nMAX; i++){free(SCHOOL[k].old_Zsamp[i]); * free(SCHOOL[k].new_Zsamp[i]);} for(i = 0; i <= nITEM; * i++){free(SCHOOL[k].old_Zitem[i]); free(SCHOOL[k].new_Zitem[i]);} * i = 0; i <= (niter-nburn)/thin; i++){ for(j = 0; j <= nMAX; j++) * free(SCHOOL[k].sample_Zsamp[i][j]); for(j = 0; j <= nITEM; j++) * free(SCHOOL[k].sample_Zitem[i][j]); free(SCHOOL[k].sample_beta[i]); * free(SCHOOL[k].sample_theta[i]); free(SCHOOL[k].sample_Zsamp[i]); * free(SCHOOL[k].sample_Zitem[i]); } for(i = 0; i <= nMAX; * i++){free(SCHOOL[k].sum_Zsamp[i]); free(SCHOOL[k].var_Zsamp[i]);} * i = 0; i <= nITEM; i++){free(SCHOOL[k].sum_Zitem[i]); * free(SCHOOL[k].var_Zitem[i]);} for(i = 0; i <= nITEM; * i++){free(SCHOOL[k].sum_item_mat[i]); * free(SCHOOL[k].var_item_mat[i]);} for(i = 0; i <= nITEM; * i++){free(SCHOOL[k].old_item_mat[i]); free(SCHOOL[k].new_item_mat[i]); * free(SCHOOL[k].sample_item_mat[i]);} free(SCHOOL[k].old_item_mat); * free(SCHOOL[k].new_item_mat); free(SCHOOL[k].oldbeta); * free(SCHOOL[k].newbeta); free(SCHOOL[k].oldtheta); * free(SCHOOL[k].newtheta); free(SCHOOL[k].count_item); * free(SCHOOL[k].count_samp); free(SCHOOL[k].Y); free(SCHOOL[k].U); * free(SCHOOL[k].dataset); free(SCHOOL[k].old_Zsamp); * free(SCHOOL[k].new_Zsamp); free(SCHOOL[k].old_Zitem); * free(SCHOOL[k].new_Zitem); free(SCHOOL[k].sample_beta); * free(SCHOOL[k].sample_theta); free(SCHOOL[k].sum_beta); * free(SCHOOL[k].var_beta); free(SCHOOL[k].acc_beta); * _theta); free(SCHOOL[k].var_theta); free(SCHOOL[k].acc_theta); * HOOL[k].sample_Zsamp); free(SCHOOL[k].sample_Zitem); * free(SCHOOL[k].sample_item_mat); free(SCHOOL[k].sum_Zsamp); * free(SCHOOL[k].var_Zsamp); free(SCHOOL[k].acc_Zsamp); * um_Zitem); free(SCHOOL[k].var_Zitem); free(SCHOOL[k].sum_item_mat); * free(SCHOOL[k].var_item_mat); free(SCHOOL[k].sample_sigma); * free(SCHOOL[k].mean_Z); } free(SCHOOL); * * free_dmatrix(sample_sigma, 1, (niter - nburn) / thin, 1, nSCHOOL); * matrix(sample_delta, 1, (niter - nburn) / thin, 1, nITEM * nDIM); * _dmatrix(sample_tau, 1, (niter - nburn) / thin, 1, nITEM * nDIM); * _dmatrix(sample_gamma, 1, (niter-nburn)/thin, 1, nITEM); * sample_varphi, 1, (niter-nburn)/thin, 1, nITEM); free_dmatrix(sum_mu, * 1, nITEM * nDIM, 0, nSCHOOL); free_dvector(sum_tau, 1, nITEM * nDIM); * ree_dvector(var_tau, 1, nITEM * nDIM); free_dvector(sum_sigma, 1, * nSCHOOL); free_dvector(var_sigma, 1, nSCHOOL); * , 1, nITEM * nDIM); free_dvector(var_delta, 1, nITEM * nDIM); * ector(sum_gamma, 1, nITEM); free_dvector(var_gamma, 1, nITEM); * vector(sum_varphi, 1, nITEM); free_dvector(var_varphi, 1, nITEM); * * free_dvector(oldsigma, 1, nSCHOOL); free_dvector(olddelta, 1, nITEM * * nDIM); free_dvector(oldtau, 1, nITEM * nDIM); free_dmatrix(oldmu, 1, * nITEM * nDIM, 0, nSCHOOL); free_dvector(oldgamma, 1, nITEM); * ctor(oldvarphi, 1, nITEM); * * free_dmatrix(mu_dist, 1, nSCHOOL, 1, nSCHOOL); free_dmatrix(sum_mu_dist, * 1, nSCHOOL, 1, nSCHOOL); free_dvector(avg_ran, 1, nSCHOOL); * tor(var_ran, 1, nSCHOOL); * * free_ivector(count, 1, nSCHOOL); * * free_dvector(sample_samp_like, 1, nMAX); free_dvector(new_samp_distance, * 1, nMAX); free_dvector(old_samp_distance, 1, nMAX); * * free_dvector(sample_item_like, 1, nMAX); free_dmatrix(new_item_distance, * 1, nITEM, 1, nITEM); free_dmatrix(old_item_distance, 1, nITEM, 1, * nITEM); */ return 0; }
#include "header.h" #include "FILE/nrutil.h" #include "FILE/stat.c" #include "cost.c" int main(int argc, const char *argv[]) { //Declare Variables FILE * inp, *JIN, *HUR, *OUT, *PRT, *JYW, *ASA, *IMS, *JJW; char buf[255], frname[255]; int stime; long ltime; int ind, ite, a, b, i, j, k, l, v, accept, gcount, mcount, mutmp, *count, show1, show2; double num, den, un, ratio; double old_like_beta, new_like_beta, old_like_theta, new_like_theta; double update_like_samp, update_like_item, tmp_oldmu, tmp_newmu; double post_a, post_b, school_a, school_b; double *old_samp_distance, *new_samp_distance, *sample_samp_like; double *old_item_distance, *new_item_distance, *sample_item_like; double **sum_mu, **mu_dist, **sum_mu_dist; double **sample_tau, *sum_tau, *var_tau; double **sample_sigma, *sum_sigma, *var_sigma; double **sample_delta, *sum_delta, *var_delta; double **sample_gamma, *sum_gamma, *var_gamma; double **sample_varphi, *sum_varphi, *var_varphi; double var_fix, avg_fix, *var_ran, *avg_ran, avg_beta, var_beta; MM = atoi(argv[1]); //Set Random Seed ltime = time(NULL); stime = (unsigned int)ltime / 2; srand(stime); printf("nseed = %d\n", stime); //Input Number of Thread /* * # pragma omp parallel { #if defined (_OPENMP) k = * omp_get_num_threads(); printf("k = %d\n", k); srand(((unsigned * int)time(NULL))^k); #endif } */ //Input Parameters inp = fopen("DATA/parameter.txt", "r"); if (inp == NULL) { printf("Can't open data file\n"); return 0; } fscanf(inp, "%d", &niter); fscanf(inp, "%d", &nburn); fscanf(inp, "%d", &thin); fscanf(inp, "%d", &print); fscanf(inp, "%d", &repeat); fscanf(inp, "%lf", &jump_beta); fscanf(inp, "%lf", &jump_theta); fscanf(inp, "%lf", &jump_mu); fscanf(inp, "%lf", &jump_W); fclose(inp); //The Number of Respondents by Schools ncount = ivector(1, nSCHOOL); inp = fopen("DATA/count.txt", "r"); for (i = 1; i <= nSCHOOL; i++) fscanf(inp, "%d", &ncount[i]); fclose(inp); jump_Z = dvector(1, 10); inp = fopen("DATA/jumprule.txt", "r"); for (i = 1; i <= 10; i++) fscanf(inp, "%lf", &jump_Z[i]); fclose(inp); jump_index = imatrix(1, nSCHOOL, 1, nITEM); inp = fopen("DATA/jumpitem.txt", "r"); for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nITEM; j++) fscanf(inp, "%d", &jump_index[i][j]); fclose(inp); //Declare typedef structure and set array of variables in typedef structure totalsize = sizeof(SCHOOL) + sizeof(int) * (nMAX + 1) * (nITEM + 1); totalsize += sizeof(int) * (nMAX + 1) + sizeof(int) * (nITEM + 1); totalsize += sizeof(int) * (nITEM + 1) * (nMAX + 1) * (nMAX + 1); totalsize += sizeof(int) * (nMAX + 1) * (nITEM + 1) * (nITEM + 1); totalsize += sizeof(double) * ((nITEM + 1) * 2 + (nMAX + 1) * 2) + sizeof(double) * ((nITEM + 1) * (nITEM + 1) * 2); totalsize += sizeof(double) * ((nMAX + 1) * (nDIM + 1) * 4 + (nITEM + 1) * (nDIM + 1) * 2); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) * (nITEM + 1) + (nITEM + 1) * 3); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) * (nMAX + 1) + (nMAX + 1) * 3); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) * ((nMAX + 1) * (nDIM + 1) + (nITEM + 1) * (nDIM + 1))); totalsize += sizeof(double) * (((niter - nburn) / thin + 1) + (nDIM + 1)); totalsize += sizeof(double) * ((nMAX + 1) * (nDIM + 1) * 2 + (nITEM + 1) * (nDIM + 1) * 2 + (nMAX + 1) + (nITEM + 1)); totalsize += sizeof(double) * ((nITEM + 1) * (nITEM + 1) * 3); SCHOOL = (YEWON *) malloc(totalsize * (nSCHOOL + 1)); for (k = 0; k <= nSCHOOL; k++) { SCHOOL[k].cbsize = totalsize; SCHOOL[k].dataset = (int **)malloc(sizeof(int *) * (nMAX + 1)); SCHOOL[k].count_samp = (int *)malloc(sizeof(int *) * (nMAX + 1)); SCHOOL[k].count_item = (int *)malloc(sizeof(int *) * (nITEM + 1)); SCHOOL[k].Y = (int ***)malloc(sizeof(int **) * (nITEM + 1)); SCHOOL[k].U = (int ***)malloc(sizeof(int **) * (nMAX + 1)); SCHOOL[k].oldbeta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].newbeta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].oldtheta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].newtheta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].old_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].new_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].old_Zmean = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].new_Zmean = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].old_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].new_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].mean_Z = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].sample_beta = (double **)malloc(sizeof(double *) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_theta = (double **)malloc(sizeof(double *) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_sigma = (double *)malloc(sizeof(double) * ((niter - nburn) / thin + 1)); SCHOOL[k].sum_beta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].var_beta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].acc_beta = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].sum_theta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].var_theta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].acc_theta = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].sample_Zsamp = (double ***)malloc(sizeof(double **) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_Zitem = (double ***)malloc(sizeof(double **) * ((niter - nburn) / thin + 1)); SCHOOL[k].sample_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].sum_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].var_Zsamp = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].acc_Zsamp = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].sum_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].var_Zitem = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].acc_Zitem = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].old_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].new_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].sum_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); SCHOOL[k].var_item_mat = (double **)malloc(sizeof(double *) * (nITEM + 1)); for (i = 0; i <= nMAX; i++) SCHOOL[k].dataset[i] = (int *)malloc(sizeof(int) * (nITEM + 1)); for (i = 0; i <= nITEM; i++) { SCHOOL[k].Y[i] = (int **)malloc(sizeof(int *) * (nMAX + 1)); for (a = 0; a <= nMAX; a++) SCHOOL[k].Y[i][a] = (int *)malloc(sizeof(int) * (nMAX + 1)); } for (i = 0; i <= nMAX; i++) { SCHOOL[k].U[i] = (int **)malloc(sizeof(int *) * (nITEM + 1)); for (a = 0; a <= nITEM; a++) SCHOOL[k].U[i][a] = (int *)malloc(sizeof(int) * (nITEM + 1)); } for (i = 0; i <= nMAX; i++) { SCHOOL[k].old_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].new_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].old_Zmean[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].new_Zmean[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nITEM; i++) { SCHOOL[k].old_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].new_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= (niter - nburn) / thin; i++) { SCHOOL[k].sample_beta[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].sample_theta[i] = (double *)malloc(sizeof(double) * (nMAX + 1)); SCHOOL[k].sample_Zsamp[i] = (double **)malloc(sizeof(double *) * (nMAX + 1)); SCHOOL[k].sample_Zitem[i] = (double **)malloc(sizeof(double *) * (nITEM + 1)); for (j = 0; j <= nMAX; j++) SCHOOL[k].sample_Zsamp[i][j] = (double *)malloc(sizeof(double) * (nDIM + 1)); for (j = 0; j <= nITEM; j++) SCHOOL[k].sample_Zitem[i][j] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nMAX; i++) { SCHOOL[k].sum_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].var_Zsamp[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nITEM; i++) { SCHOOL[k].sum_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); SCHOOL[k].var_Zitem[i] = (double *)malloc(sizeof(double) * (nDIM + 1)); } for (i = 0; i <= nITEM; i++) { SCHOOL[k].sample_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].old_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].new_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].sum_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); SCHOOL[k].var_item_mat[i] = (double *)malloc(sizeof(double) * (nITEM + 1)); } printf("MEMORY SETTING: %.2d\n", k); } count = ivector(1, nSCHOOL); oldmu = dmatrix(1, nITEM * (nITEM - 1) / 2, 0, nSCHOOL); olddelta = dvector(1, nITEM * (nITEM - 1) / 2); oldsigma = dvector(1, nSCHOOL); oldtau = dvector(1, nITEM * (nITEM - 1) / 2); oldgamma = dvector(1, nITEM); oldvarphi = dvector(1, nITEM); sample_sigma = dmatrix(1, (niter - nburn) / thin, 1, nSCHOOL); sample_delta = dmatrix(1, (niter - nburn) / thin, 1, nITEM * (nITEM - 1) / 2); sample_tau = dmatrix(1, (niter - nburn) / thin, 1, nITEM * (nITEM - 1) / 2); sample_gamma = dmatrix(1, (niter - nburn) / thin, 1, nITEM); sample_varphi = dmatrix(1, (niter - nburn) / thin, 1, nITEM); sum_mu = dmatrix(1, nITEM * (nITEM - 1) / 2, 0, nSCHOOL); sum_tau = dvector(1, nITEM * (nITEM - 1) / 2); var_tau = dvector(1, nITEM * (nITEM - 1) / 2); sum_sigma = dvector(1, nSCHOOL); var_sigma = dvector(1, nSCHOOL); sum_delta = dvector(1, nITEM * (nITEM - 1) / 2); var_delta = dvector(1, nITEM * (nITEM - 1) / 2); sum_gamma = dvector(1, nITEM); var_gamma = dvector(1, nITEM); sum_varphi = dvector(1, nITEM); var_varphi = dvector(1, nITEM); mu_dist = dmatrix(1, nSCHOOL, 1, nSCHOOL); sum_mu_dist = dmatrix(1, nSCHOOL, 1, nSCHOOL); avg_ran = dvector(1, nSCHOOL); var_ran = dvector(1, nSCHOOL); frname[0] = 'D'; frname[1] = 'A'; frname[2] = 'T'; frname[3] = 'A'; frname[4] = '/'; frname[5] = 'i'; frname[6] = 't'; frname[7] = 'e'; frname[8] = 'm'; frname[11] = '.'; frname[12] = 't'; frname[13] = 'x'; frname[14] = 't'; frname[15] = '\0'; for (k = 0; k <= nSCHOOL; k++) { for (i = 0; i <= nMAX; i++) SCHOOL[k].count_samp[i] = 0; for (i = 0; i <= nITEM; i++) SCHOOL[k].count_item[i] = 0; for (i = 0; i <= nMAX; i++) for (j = 0; j <= nITEM; j++) SCHOOL[k].dataset[i][j] = 0; for (i = 0; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = SCHOOL[k].newbeta[i] = 0.0; for (i = 0; i <= nMAX; i++) SCHOOL[k].oldtheta[i] = SCHOOL[k].newtheta[i] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nITEM; j++) SCHOOL[k].old_item_mat[i][j] = SCHOOL[k].new_item_mat[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (a = 0; a <= nMAX; a++) for (b = 0; b <= nMAX; b++) SCHOOL[k].Y[i][a][b] = 0; for (i = 0; i <= nMAX; i++) for (a = 0; a <= nITEM; a++) for (b = 0; b <= nITEM; b++) SCHOOL[k].U[i][a][b] = 0; for (i = 0; i <= nMAX; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] = SCHOOL[k].new_Zmean[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = 0.0; for (i = 0; i <= (niter - nburn) / thin; i++) { SCHOOL[k].sample_sigma[i] = 0.0; for (j = 0; j <= nITEM; j++) SCHOOL[k].sample_beta[i][j] = 0.0; for (j = 0; j <= nMAX; j++) SCHOOL[k].sample_theta[i][j] = 0.0; for (a = 0; a <= nMAX; a++) for (b = 0; b <= nDIM; b++) SCHOOL[k].sample_Zsamp[i][a][b] = 0.0; for (a = 0; a <= nITEM; a++) for (b = 0; b <= nDIM; b++) SCHOOL[k].sample_Zitem[i][a][b] = 0.0; } SCHOOL[k].oldsigma = 0.0; SCHOOL[k].sum_sigma = SCHOOL[k].var_sigma = 0.0; for (i = 0; i <= nDIM; i++) SCHOOL[k].mean_Z[i] = 0.0; for (i = 0; i <= nITEM; i++) SCHOOL[k].var_beta[i] = SCHOOL[k].sum_beta[i] = SCHOOL[k].acc_beta[i] = 0.0; for (i = 0; i <= nMAX; i++) SCHOOL[k].var_theta[i] = SCHOOL[k].sum_theta[i] = SCHOOL[k].acc_theta[i] = 0.0; for (i = 0; i <= nMAX; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].sum_Zsamp[i][j] = SCHOOL[k].var_Zsamp[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nDIM; j++) SCHOOL[k].sum_Zitem[i][j] = SCHOOL[k].var_Zitem[i][j] = 0.0; for (i = 0; i <= nITEM; i++) for (j = 0; j <= nITEM; j++) SCHOOL[k].sample_item_mat[i][j] = SCHOOL[k].sum_item_mat[i][j] = SCHOOL[k].var_item_mat[i][j] = 0.0; for (i = 0; i <= nMAX; i++) SCHOOL[k].acc_Zsamp[i] = 0.0; for (i = 0; i <= nITEM; i++) SCHOOL[k].acc_Zitem[i] = 0.0; if (k != 0) count[k] = 0; if (k != 0) { if (k < 10) { frname[9] = (char)(48); frname[10] = (char)(k + 48); } else { frname[9] = (char)(k / 10 + 48); frname[10] = (char)(k % 10 + 48); } inp = fopen(frname, "r"); printf("Currently Reading %s\n", frname); if (inp == NULL) { printf("Cannot open data file\n"); return 0; } for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nITEM; j++) { fscanf(inp, "%d", &SCHOOL[k].dataset[i][j]); SCHOOL[k].count_samp[i] += SCHOOL[k].dataset[i][j]; SCHOOL[k].count_item[j] += SCHOOL[k].dataset[i][j]; } fclose(inp); printf("%.2d\n", k); for (i = 1; i <= ncount[k]; i++) { for (j = 1; j <= nITEM; j++) printf("%d ", SCHOOL[k].dataset[i][j]); printf("\n"); } for (i = 1; i <= nITEM; i++) for (a = 2; a <= ncount[k]; a++) for (b = 1; b < a; b++) { SCHOOL[k].Y[i][a][b] = SCHOOL[k].dataset[a][i] * SCHOOL[k].dataset[b][i]; SCHOOL[k].Y[i][b][a] = SCHOOL[k].Y[i][a][b]; } for (a = 1; a <= ncount[k]; a++) for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) { SCHOOL[k].U[a][i][j] = SCHOOL[k].dataset[a][i] * SCHOOL[k].dataset[a][j]; SCHOOL[k].U[a][j][i] = SCHOOL[k].U[a][i][j]; } } printf("INITIALIZATION AND DATA LOADING: %.2d\n", k); } for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { oldtau[i] = olddelta[i] = 0.0; for (j = 1; j <= nSCHOOL; j++) oldmu[i][j] = 0.0; } for (i = 1; i <= nSCHOOL; i++) oldsigma[i] = 0.0; //Declare Additional Variables sample_samp_like = dvector(1, nMAX); old_samp_distance = dvector(1, nMAX); new_samp_distance = dvector(1, nMAX); sample_item_like = dvector(1, nITEM); old_item_distance = dvector(1, nITEM); new_item_distance = dvector(1, nITEM); pr_var_Z = sqrt(2.0); for (v = 0; v < repeat; v++) { //Initialize Variables for (k = 1; k <= nSCHOOL; k++) { for (i = 1; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = SCHOOL[k].newbeta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) SCHOOL[k].oldtheta[i] = SCHOOL[k].newtheta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] = SCHOOL[k].new_Zmean[i][j] = 0.0; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = 0.0; for (i = 1; i <= (niter - nburn) / thin; i++) { SCHOOL[k].sample_sigma[i] = 0.0; for (j = 1; j <= nITEM; j++) SCHOOL[k].sample_beta[i][j] = 0.0; for (j = 1; j <= ncount[k]; j++) SCHOOL[k].sample_theta[i][j] = 0.0; for (a = 1; a <= ncount[k]; a++) for (b = 1; b <= nDIM; b++) SCHOOL[k].sample_Zsamp[i][a][b] = 0.0; for (a = 1; a <= nITEM; a++) for (b = 1; b <= nDIM; b++) SCHOOL[k].sample_Zitem[i][a][b] = 0.0; } for (i = 1; i <= nITEM; i++) SCHOOL[k].var_beta[i] = SCHOOL[k].sum_beta[i] = SCHOOL[k].acc_beta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) SCHOOL[k].var_theta[i] = SCHOOL[k].sum_theta[i] = SCHOOL[k].acc_theta[i] = 0.0; for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].sum_Zsamp[i][j] = SCHOOL[k].var_Zsamp[i][j] = 0.0; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].sum_Zitem[i][j] = SCHOOL[k].var_Zitem[i][j] = 0.0; for (i = 1; i <= nITEM; i++) SCHOOL[k].acc_Zitem[i] = 0.0; for (i = 1; i <= nMAX; i++) SCHOOL[k].acc_Zsamp[i] = 0.0; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nITEM; j++) { SCHOOL[k].sample_item_mat[i][j] = 0.0; SCHOOL[k].old_item_mat[i][j] = SCHOOL[k].new_item_mat[i][j] = 0.0; SCHOOL[k].sum_item_mat[i][j] = SCHOOL[k].var_item_mat[i][j] = 0.0; } for (i = 0; i <= nDIM; i++) SCHOOL[k].mean_Z[i] = 0.0; SCHOOL[k].oldsigma = SCHOOL[k].sum_sigma = SCHOOL[k].var_sigma = 0.0; count[k] = 0; } for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { olddelta[i] = oldtau[i] = 0.0; sum_delta[i] = var_delta[i] = 0.0; sum_tau[i] = var_tau[i] = 0.0; for (j = 1; j <= (niter - nburn) / thin; j++) sample_tau[j][i] = sample_delta[j][i] = 0.0; } for (i = 1; i <= nSCHOOL; i++) { oldsigma[i] = 0.0; sum_sigma[i] = var_sigma[i] = 0.0; for (j = 1; j <= (niter - nburn) / thin; j++) sample_sigma[j][i] = 0.0; } for (k = 1; k <= nSCHOOL; k++) for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) oldmu[i][k] = sum_mu[i][k] = 0.0; for (i = 1; i <= nITEM; i++) { oldgamma[i] = oldvarphi[i] = 0.0; sum_gamma[i] = var_gamma[i] = 0.0; sum_varphi[i] = var_varphi[i] = 0.0; for (j = 1; j <= (niter - nburn) / thin; j++) sample_gamma[j][i] = sample_varphi[j][i] = 0.0; } for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nSCHOOL; j++) sum_mu_dist[i][j] = 0.0; //Generate Initial Values for beta ,Z, sigma for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { olddelta[i] = -1.5 + 3.0 * rand() / RAND_MAX; oldtau[i] = 100.0; for (j = 1; j <= nSCHOOL; j++) oldmu[i][j] = -1.5 + 3.0 * rand() / RAND_MAX; } for (i = 1; i <= nSCHOOL; i++) oldsigma[i] = 100.0; for (i = 1; i <= nITEM; i++) { oldgamma[i] = -1.5 + 3.0 * rand() / RAND_MAX; oldvarphi[i] = 100.0; } for (k = 1; k <= nSCHOOL; k++) { SCHOOL[k].oldsigma = 0.05 * 0.05; for (i = 1; i <= nITEM; i++) SCHOOL[k].oldbeta[i] = -1.5 + 3.0 * rand() / RAND_MAX; for (i = 1; i <= ncount[k]; i++) SCHOOL[k].oldtheta[i] = -1.5 + 3.0 * rand() / RAND_MAX; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].old_Zitem[i][j] = SCHOOL[k].new_Zitem[i][j] = -1.5 + 3.0 * rand() / RAND_MAX; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) for (a = 1; a <= ncount[k]; a++) if (SCHOOL[k].dataset[a][i] == 1) SCHOOL[k].old_Zmean[a][j] += SCHOOL[k].old_Zitem[i][j] / (SCHOOL[k].count_samp[a] * 1.0); for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].new_Zmean[i][j] = SCHOOL[k].old_Zmean[i][j]; for (i = 1; i <= ncount[k]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[k].new_Zsamp[i][j] = SCHOOL[k].old_Zsamp[i][j] = SCHOOL[k].old_Zmean[i][j] + sqrt(SCHOOL[k].oldsigma) * gasdev(); for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) { for (l = 1; l <= nDIM; l++) SCHOOL[k].old_item_mat[i][j] += pow((SCHOOL[k].old_Zitem[i][l] - SCHOOL[k].old_Zitem[j][l]), 2.0); SCHOOL[k].old_item_mat[i][j] = sqrt(SCHOOL[k].old_item_mat[i][j]); SCHOOL[k].old_item_mat[j][i] = SCHOOL[k].old_item_mat[i][j]; } for (i = 1; i <= nITEM; i++) for (j = 1; j <= nITEM; j++) SCHOOL[k].new_item_mat[i][j] = SCHOOL[k].old_item_mat[i][j]; } //MCMC Implementation for Parameter Estimation frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'i'; frname[9] = 'm'; frname[10] = '_'; frname[12] = (char)(48 + MM); frname[13] = '.'; frname[14] = 'l'; frname[15] = 'o'; frname[16] = 'g'; frname[17] = '\0'; frname[11] = 's'; HUR = fopen(frname, "a"); frname[11] = 'l'; JYW = fopen(frname, "a"); frname[11] = 'u'; OUT = fopen(frname, "a"); frname[11] = 'g'; JIN = fopen(frname, "a"); frname[11] = 'p'; PRT = fopen(frname, "a"); frname[11] = 'a'; JJW = fopen(frname, "a"); gcount = mcount = 0; for (iter = 1; iter <= niter; iter++) { for (a = 1; a <= nSCHOOL; a++) { for (i = 1; i <= nITEM; i++) { // #pragma omp parallel for private(j, k) default(shared) for (j = 1; j <= nDIM; j++) { SCHOOL[a].new_Zitem[i][j] = SCHOOL[a].old_Zitem[i][j] + jump_Z[jump_index[a][i]] * gasdev(); for (k = 1; k <= ncount[a]; k++) if (SCHOOL[a].dataset[k][i] == 1) { SCHOOL[a].new_Zmean[k][j] -= SCHOOL[a].old_Zitem[i][j] / (SCHOOL[a].count_samp[k] * 1.0); SCHOOL[a].new_Zmean[k][j] += SCHOOL[a].new_Zitem[i][j] / (SCHOOL[a].count_samp[k] * 1.0); } } for (ind = 1; ind <= nITEM; ind++) sample_item_like[ind] = old_item_distance[ind] = new_item_distance[ind] = 0.0; // #pragma omp parallel for private(ind, k, l) default(shared) for (ind = 1; ind <= nITEM; ind++) if (ind != i) { for (l = 1; l <= nDIM; l++) { old_item_distance[ind] += pow((SCHOOL[a].old_Zitem[ind][l] - SCHOOL[a].old_Zitem[i][l]), 2.0); new_item_distance[ind] += pow((SCHOOL[a].new_Zitem[ind][l] - SCHOOL[a].new_Zitem[i][l]), 2.0); } old_item_distance[ind] = sqrt(old_item_distance[ind]); new_item_distance[ind] = sqrt(new_item_distance[ind]); SCHOOL[a].new_item_mat[ind][i] = new_item_distance[ind]; SCHOOL[a].new_item_mat[i][ind] = SCHOOL[a].new_item_mat[ind][i]; SCHOOL[a].old_item_mat[ind][i] = old_item_distance[ind]; SCHOOL[a].old_item_mat[i][ind] = SCHOOL[a].old_item_mat[ind][i]; for (k = 1; k <= ncount[a]; k++) { if (SCHOOL[a].U[k][ind][i] == 1) { sample_item_like[ind] -= -log(1.0 + exp(-(SCHOOL[a].oldtheta[k] - old_item_distance[ind]))); sample_item_like[ind] += -log(1.0 + exp(-(SCHOOL[a].oldtheta[k] - new_item_distance[ind]))); } else { sample_item_like[ind] -= -log(1.0 + exp(SCHOOL[a].oldtheta[k] - old_item_distance[ind])); sample_item_like[ind] += -log(1.0 + exp(SCHOOL[a].oldtheta[k] - new_item_distance[ind])); } } } update_like_item = 0.0; for (ind = 1; ind <= nITEM; ind++) update_like_item += sample_item_like[ind]; num = den = 0.0; for (j = 2; j <= nITEM; j++) for (k = 1; k < j; k++) { if (SCHOOL[a].new_item_mat[j][k] > 0.0001) num += dlognorm(log(SCHOOL[a].new_item_mat[j][k]), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); else num += dlognorm(log(0.0001), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); if (SCHOOL[a].old_item_mat[j][k] > 0.0001) den += dlognorm(log(SCHOOL[a].old_item_mat[j][k]), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); else den += dlognorm(log(0.0001), olddelta[((j - 1) * (j - 2) / 2 + k)], sqrt(oldtau[((j - 1) * (j - 2) / 2 + k)])); //printf("%d %d-%.3f %.3f %.3f %.3f %.3f\n", j, k, num, den, oldmu[((j - 1) * (j - 2) / 2 + k)][a], log(SCHOOL[a].new_item_mat[j][k]), log(SCHOOL[a].old_item_mat[j][k])); } ratio = update_like_item + (num - den); //printf("SCHOOL-%.2d, ITEM-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } if (accept == 1) { for (j = 1; j <= nDIM; j++) { SCHOOL[a].old_Zitem[i][j] = SCHOOL[a].new_Zitem[i][j]; for (k = 1; k <= ncount[a]; k++) if (SCHOOL[a].dataset[k][i] == 1) SCHOOL[a].old_Zmean[k][j] = SCHOOL[a].new_Zmean[k][j]; } SCHOOL[a].acc_Zitem[i] += 1.0 / niter; for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) SCHOOL[a].old_item_mat[j][k] = SCHOOL[a].new_item_mat[j][k]; } else { for (j = 1; j <= nDIM; j++) { SCHOOL[a].new_Zitem[i][j] = SCHOOL[a].old_Zitem[i][j]; for (k = 1; k <= ncount[a]; k++) if (SCHOOL[a].dataset[k][i] == 1) SCHOOL[a].new_Zmean[k][j] = SCHOOL[a].old_Zmean[k][j]; } for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) SCHOOL[a].new_item_mat[j][k] = SCHOOL[a].old_item_mat[j][k]; } } for (i = 1; i <= ncount[a]; i++) { for (j = 1; j <= nDIM; j++) SCHOOL[a].new_Zsamp[i][j] = SCHOOL[a].old_Zsamp[i][j] + jump_W * gasdev(); for (ind = 1; ind <= ncount[a]; ind++) sample_samp_like[ind] = old_samp_distance[ind] = new_samp_distance[ind] = 0.0; // #pragma omp parallel for private(ind, k, l) default(shared) for (ind = 1; ind <= ncount[a]; ind++) if (ind != i) { for (l = 1; l <= nDIM; l++) { old_samp_distance[ind] += pow((SCHOOL[a].old_Zsamp[ind][l] - SCHOOL[a].old_Zsamp[i][l]), 2.0); new_samp_distance[ind] += pow((SCHOOL[a].old_Zsamp[ind][l] - SCHOOL[a].new_Zsamp[i][l]), 2.0); } old_samp_distance[ind] = sqrt(old_samp_distance[ind]); new_samp_distance[ind] = sqrt(new_samp_distance[ind]); for (k = 1; k <= nITEM; k++) { if (SCHOOL[a].Y[k][ind][i] == 1) { sample_samp_like[ind] -= -log(1.0 + exp(-(SCHOOL[a].oldbeta[k] - old_samp_distance[ind]))); sample_samp_like[ind] += -log(1.0 + exp(-(SCHOOL[a].oldbeta[k] - new_samp_distance[ind]))); } else { sample_samp_like[ind] -= -log(1.0 + exp(SCHOOL[a].oldbeta[k] - old_samp_distance[ind])); sample_samp_like[ind] += -log(1.0 + exp(SCHOOL[a].oldbeta[k] - new_samp_distance[ind])); } } } update_like_samp = 0.0; for (ind = 1; ind <= ncount[a]; ind++) update_like_samp += sample_samp_like[ind]; //printf("SCHOOL-%.2d, PERSON-%.2d: LIKELIHOOD_PERSON-%.3f\n", a, i, update_like_samp); num = den = 0.0; //printf("SCHOOL-%.2d, PERSON-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); for (j = 1; j <= nDIM; j++) { num += dlognorm(SCHOOL[a].new_Zsamp[i][j], SCHOOL[a].old_Zmean[i][j], sqrt(SCHOOL[a].oldsigma)); den += dlognorm(SCHOOL[a].old_Zsamp[i][j], SCHOOL[a].old_Zmean[i][j], sqrt(SCHOOL[a].oldsigma)); } ratio = update_like_samp + (num - den); //printf("SCHOOL-%.2d, PERSON-%.2d: Num-%.3f, Den-%.3f\n", a, i, num, den); if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } if (accept == 1) { for (j = 1; j <= nDIM; j++) SCHOOL[a].old_Zsamp[i][j] = SCHOOL[a].new_Zsamp[i][j]; SCHOOL[a].acc_Zsamp[i] += 1.0 / niter; } else { for (j = 1; j <= nDIM; j++) SCHOOL[a].new_Zsamp[i][j] = SCHOOL[a].old_Zsamp[i][j]; } } SCHOOL[a].post_a = prior_a; SCHOOL[a].post_b = prior_b; for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) { SCHOOL[a].post_a += 0.5; SCHOOL[a].post_b += 0.5 * (SCHOOL[a].old_Zsamp[i][j] - SCHOOL[a].old_Zmean[i][j]) * (SCHOOL[a].old_Zsamp[i][j] - SCHOOL[a].old_Zmean[i][j]); } SCHOOL[a].oldsigma = 1.0 / Rgamma(SCHOOL[a].post_a, SCHOOL[a].post_b); //2. Update $ \ beta_i$ from the proposal distribution $ \ phi_2(\cdot) $ // #pragma omp parallel for private(i, j, k, old_like_beta, new_like_beta, num, den, accept, ratio, un) default(shared) for (i = 1; i <= nITEM; i++) { old_like_beta = cost_beta(i, SCHOOL[a].oldbeta[i], a); SCHOOL[a].newbeta[i] = SCHOOL[a].oldbeta[i] + jump_beta * gasdev(); if (fabs(SCHOOL[a].newbeta[i]) < 7.0) { new_like_beta = cost_beta(i, SCHOOL[a].newbeta[i], a); num = new_like_beta; den = old_like_beta; num += dlognorm(SCHOOL[a].oldbeta[i], oldgamma[i], sqrt(oldvarphi[i])); den += dlognorm(SCHOOL[a].newbeta[i], oldgamma[i], sqrt(oldvarphi[i])); ratio = num - den; if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } } else accept = 0; if (accept == 1) { SCHOOL[a].oldbeta[i] = SCHOOL[a].newbeta[i]; SCHOOL[a].acc_beta[i] += 1.0 / niter; } else SCHOOL[a].newbeta[i] = SCHOOL[a].oldbeta[i]; } // #pragma omp parallel for private(i, old_like_theta, new_like_theta, num, den, accept, ratio, un) default(shared) for (i = 1; i <= ncount[a]; i++) { old_like_theta = cost_theta(i, SCHOOL[a].oldtheta[i], a); SCHOOL[a].newtheta[i] = SCHOOL[a].oldtheta[i] + jump_theta * gasdev(); new_like_theta = cost_theta(i, SCHOOL[a].newtheta[i], a); num = dlognorm(SCHOOL[a].newtheta[i], pr_mean_theta, pr_var_theta) + new_like_theta; den = dlognorm(SCHOOL[a].oldtheta[i], pr_mean_theta, pr_var_theta) + old_like_theta; ratio = num - den; if (ratio > 0.0) accept = 1; else { un = rand() * 1.0 / RAND_MAX; if (log(un) < ratio) accept = 1; else accept = 0; } if (accept == 1) { SCHOOL[a].oldtheta[i] = SCHOOL[a].newtheta[i]; SCHOOL[a].acc_theta[i] += 1.0 / niter; } else SCHOOL[a].newtheta[i] = SCHOOL[a].oldtheta[i]; } //Save MCMC Results to Files and Repository Variables if (iter > nburn && iter % thin == 0) { count[a]++; for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].sample_Zsamp[count[a]][i][j] = SCHOOL[a].old_Zsamp[i][j]; for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].sample_Zitem[count[a]][i][j] = SCHOOL[a].old_Zitem[i][j]; for (i = 1; i <= nITEM; i++) SCHOOL[a].sample_beta[count[a]][i] = SCHOOL[a].oldbeta[i]; for (i = 1; i <= ncount[a]; i++) SCHOOL[a].sample_theta[count[a]][i] = SCHOOL[a].oldtheta[i]; SCHOOL[a].sample_sigma[count[a]] = SCHOOL[a].oldsigma; } //Print MCMC Results to Screen if (iter % print == 0) { printf("%.5d-BETA%.2d ", iter, a); for (i = 1; i <= nITEM; i++) printf("% .4f ", SCHOOL[a].oldbeta[i]); printf("%.4f\n", SCHOOL[a].oldsigma); } } // #pragma omp parallel for private(i, j, school_a, school_b, avg_beta, var_beta) default(shared) for (i = 1; i <= nITEM; i++) { school_a = prior_a; school_b = prior_b; for (j = 1; j <= nSCHOOL; j++) { school_a += 0.5; school_b += 0.5 * (SCHOOL[j].oldbeta[i] - oldgamma[i]) * (SCHOOL[j].oldbeta[i] - oldgamma[i]); } oldvarphi[i] = 1.0 / Rgamma(school_a, school_b); var_beta = 1.0 / (1.0 / pr_var_gamma + nSCHOOL / oldvarphi[i]); avg_beta = 0.0; for (j = 1; j <= nSCHOOL; j++) avg_beta += SCHOOL[j].oldbeta[i] / nSCHOOL; avg_beta *= var_beta * (nSCHOOL / oldvarphi[i]); oldgamma[i] = avg_beta + sqrt(var_beta) * gasdev(); } for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) { post_a = prior_a; post_b = prior_b; for (k = 1; k <= nSCHOOL; k++) { post_a += 0.5; if (SCHOOL[k].old_item_mat[i][j] > 0.0001) post_b += 0.5 * (log(SCHOOL[k].old_item_mat[i][j]) - olddelta[((i - 1) * (i - 2) / 2 + j)]) * (log(SCHOOL[k].old_item_mat[i][j]) - olddelta[((i - 1) * (i - 2) / 2 + j)]); else post_b += 0.5 * (log(0.0001) - olddelta[((i - 1) * (i - 2) / 2 + j)]) * (log(0.0001) - olddelta[((i - 1) * (i - 2) / 2 + j)]); } oldtau[((i - 1) * (i - 2) / 2 + j)] = 1.0 / Rgamma(post_a, post_b); var_fix = 1.0 / (1.0 / pr_var_delta + nSCHOOL / oldtau[((i - 1) * (i - 2) / 2 + j)]); avg_fix = 0.0; for (k = 1; k <= nSCHOOL; k++) { if (SCHOOL[k].old_item_mat[i][j] > 0.0001) avg_fix += (1.0 / oldtau[((i - 1) * (i - 2) / 2 + j)]) * log(SCHOOL[k].old_item_mat[i][j]); else avg_fix += (1.0 / oldtau[((i - 1) * (i - 2) / 2 + j)]) * log(0.0001); } avg_fix *= var_fix; olddelta[((i - 1) * (i - 2) / 2 + j)] = avg_fix + sqrt(var_fix) * gasdev(); } if (iter % print == 0) for (i = 1; i <= nITEM; i++) { printf("%.5d-GAMMA, VARPHI, ITEM%.2d: ", iter, i); printf("% .4f %.4f\n", oldgamma[i], oldvarphi[i]); } if (iter > nburn && iter % thin == 0) { gcount++; for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { sample_tau[gcount][i] = sqrt(oldtau[i]); sample_delta[gcount][i] = olddelta[i]; fprintf(JYW, "% .4f ", sample_delta[gcount][i]); fprintf(OUT, "%.4f ", sample_tau[gcount][i]); } for (i = 1; i <= nSCHOOL; i++) { sample_sigma[gcount][i] = sqrt(oldsigma[i]); fprintf(HUR, "%.4f ", sample_sigma[gcount][i]); } for (k = 1; k <= nSCHOOL; k++) for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) sum_mu[i][k] += oldmu[i][k] / ((niter - nburn) / thin); for (i = 1; i <= nITEM; i++) { sample_gamma[gcount][i] = oldgamma[i]; sample_varphi[gcount][i] = sqrt(oldvarphi[i]); fprintf(JIN, "% .4f ", sample_gamma[gcount][i]); fprintf(PRT, "%.4f ", sample_varphi[gcount][i]); } for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nSCHOOL; j++) mu_dist[i][j] = 0.0; for (k = 1; k <= nITEM * (nITEM - 1) / 2; k++) for (i = 2; i <= nSCHOOL; i++) for (j = 1; j < i; j++) mu_dist[i][j] += (oldmu[k][i] - oldmu[k][j]) * (oldmu[k][i] - oldmu[k][j]); for (i = 2; i <= nSCHOOL; i++) for (j = 1; j < i; j++) mu_dist[j][i] = mu_dist[i][j]; for (i = 1; i <= nSCHOOL; i++) for (j = 1; j <= nSCHOOL; j++) sum_mu_dist[i][j] += sqrt(mu_dist[i][j]) / ((niter - nburn) / thin); for (i = 2; i <= nSCHOOL; i++) for (j = 1; j < i; j++) fprintf(JJW, "%.4f ", sqrt(mu_dist[i][j])); fprintf(HUR, "\n"); fprintf(OUT, "\n"); fprintf(JYW, "\n"); fprintf(JIN, "\n"); fprintf(PRT, "\n"); fprintf(JJW, "\n"); } } fclose(HUR); fclose(JYW); fclose(OUT); fclose(JIN); fclose(PRT); fclose(JJW); frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'i'; frname[9] = 'm'; frname[12] = '_'; frname[14] = (char)(48 + MM); frname[15] = '.'; frname[16] = 'l'; frname[17] = 'o'; frname[18] = 'g'; frname[19] = '\0'; for (a = 1; a <= nSCHOOL; a++) { if (a < 10) { frname[10] = (char)(48); frname[11] = (char)(a + 48); } else { frname[10] = (char)(a / 10 + 48); frname[11] = (char)(a % 10 + 48); } frname[13] = 'z'; JIN = fopen(frname, "a"); frname[13] = 'b'; HUR = fopen(frname, "a"); frname[13] = 't'; OUT = fopen(frname, "a"); frname[13] = 'i'; JYW = fopen(frname, "a"); frname[13] = 'h'; ASA = fopen(frname, "a"); for (k = 1; k <= count[a]; k++) { for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "% .4f ", SCHOOL[a].sample_Zsamp[k][i][j]); fprintf(JIN, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "% .4f ", SCHOOL[a].sample_Zitem[k][i][j]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "% .4f ", SCHOOL[a].sample_beta[k][i]); fprintf(HUR, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "% .4f ", SCHOOL[a].sample_theta[k][i]); fprintf(OUT, "\n"); fprintf(ASA, "%.4f\n", SCHOOL[a].sample_sigma[k]); } fclose(JIN); fclose(HUR); fclose(OUT); fclose(JYW); fclose(ASA); } //Calculate Mean and Variance of MCMC Estimators for (a = 1; a <= nSCHOOL; a++) { for (i = 1; i <= count[a]; i++) { SCHOOL[a].sum_sigma += SCHOOL[a].sample_sigma[i] / count[a]; SCHOOL[a].var_sigma += SCHOOL[a].sample_sigma[i] * SCHOOL[a].sample_sigma[i] / (count[a] - 1); for (j = 1; j <= nITEM; j++) { SCHOOL[a].sum_beta[j] += SCHOOL[a].sample_beta[i][j] / count[a]; SCHOOL[a].var_beta[j] += SCHOOL[a].sample_beta[i][j] * SCHOOL[a].sample_beta[i][j] / (count[a] - 1); } for (j = 1; j <= ncount[a]; j++) { SCHOOL[a].sum_theta[j] += SCHOOL[a].sample_theta[i][j] / count[a]; SCHOOL[a].var_theta[j] += SCHOOL[a].sample_theta[i][j] * SCHOOL[a].sample_theta[i][j] / (count[a] - 1); } for (j = 1; j <= ncount[a]; j++) for (k = 1; k <= nDIM; k++) { SCHOOL[a].sum_Zsamp[j][k] += SCHOOL[a].sample_Zsamp[i][j][k] / count[a]; SCHOOL[a].var_Zsamp[j][k] += SCHOOL[a].sample_Zsamp[i][j][k] * SCHOOL[a].sample_Zsamp[i][j][k] / (count[a] - 1); } for (j = 1; j <= nITEM; j++) for (k = 1; k <= nDIM; k++) { SCHOOL[a].sum_Zitem[j][k] += SCHOOL[a].sample_Zitem[i][j][k] / count[a]; SCHOOL[a].var_Zitem[j][k] += SCHOOL[a].sample_Zitem[i][j][k] * SCHOOL[a].sample_Zitem[i][j][k] / (count[a] - 1); } for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) SCHOOL[a].sample_item_mat[j][k] = 0.0; for (j = 2; j <= nITEM; j++) for (k = 1; k < j; k++) for (l = 1; l <= nDIM; l++) SCHOOL[a].sample_item_mat[j][k] += pow((SCHOOL[a].sample_Zitem[i][j][l] - SCHOOL[a].sample_Zitem[i][k][l]), 2.0); for (j = 2; j <= nITEM; j++) for (k = 1; k < j; k++) SCHOOL[a].sample_item_mat[k][j] = SCHOOL[a].sample_item_mat[j][k]; for (j = 1; j <= nITEM; j++) for (k = 1; k <= nITEM; k++) { SCHOOL[a].sum_item_mat[j][k] += SCHOOL[a].sample_item_mat[j][k] / count[a]; SCHOOL[a].var_item_mat[j][k] += SCHOOL[a].sample_item_mat[j][k] * SCHOOL[a].sample_item_mat[j][k] / (count[a] - 1); } } SCHOOL[a].var_sigma -= SCHOOL[a].sum_sigma * SCHOOL[a].sum_sigma * count[a] / (count[a] - 1); for (i = 1; i <= nITEM; i++) SCHOOL[a].var_beta[i] -= SCHOOL[a].sum_beta[i] * SCHOOL[a].sum_beta[i] * count[a] / (count[a] - 1); for (i = 1; i <= ncount[a]; i++) SCHOOL[a].var_theta[i] -= SCHOOL[a].sum_theta[i] * SCHOOL[a].sum_theta[i] * count[a] / (count[a] - 1); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].var_Zsamp[i][j] -= SCHOOL[a].sum_Zsamp[i][j] * SCHOOL[a].sum_Zsamp[i][j] * count[a] / (count[a] - 1); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) SCHOOL[a].var_Zitem[i][j] -= SCHOOL[a].sum_Zitem[i][j] * SCHOOL[a].sum_Zitem[i][j] * count[a] / (count[a] - 1); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nITEM; j++) SCHOOL[a].var_item_mat[i][j] -= SCHOOL[a].sum_item_mat[i][j] * SCHOOL[a].sum_item_mat[i][j] * count[a] / (count[a] - 1); } for (i = 1; i <= gcount; i++) { for (j = 1; j <= nITEM * (nITEM - 1) / 2; j++) { sum_tau[j] += sample_tau[i][j] / gcount; sum_delta[j] += sample_delta[i][j] / gcount; var_tau[j] += sample_tau[i][j] * sample_tau[i][j] / (gcount - 1); var_delta[j] += sample_delta[i][j] * sample_delta[i][j] / (gcount - 1); } for (j = 1; j <= nSCHOOL; j++) { sum_sigma[j] += sample_sigma[i][j] / gcount; var_sigma[j] += sample_sigma[i][j] * sample_sigma[i][j] / (gcount - 1); } for (j = 1; j <= nITEM; j++) { sum_gamma[j] += sample_gamma[i][j] / gcount; sum_varphi[j] += sample_varphi[i][j] / gcount; var_gamma[j] += sample_gamma[i][j] * sample_gamma[i][j] / (gcount - 1); var_varphi[j] += sample_varphi[i][j] * sample_varphi[i][j] / (gcount - 1); } } for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) { var_tau[i] -= sum_tau[i] * sum_tau[i] * gcount / (gcount - 1); var_delta[i] -= sum_delta[i] * sum_delta[i] * gcount / (gcount - 1); } for (i = 1; i <= nSCHOOL; i++) var_sigma[i] -= sum_sigma[i] * sum_sigma[i] * gcount / (gcount - 1); for (i = 1; i <= nITEM; i++) { var_gamma[i] -= sum_gamma[i] * sum_gamma[i] * gcount / (gcount - 1); var_varphi[i] -= sum_varphi[i] * sum_varphi[i] * gcount / (gcount - 1); } //Save Parameter Estimates frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'u'; frname[9] = 'm'; frname[12] = '_'; frname[14] = (char)(48 + MM); frname[15] = '.'; frname[16] = 'l'; frname[17] = 'o'; frname[18] = 'g'; frname[19] = '\0'; for (a = 1; a <= nSCHOOL; a++) { if (a < 10) { frname[10] = (char)(48); frname[11] = (char)(a + 48); } else { frname[10] = (char)(a / 10 + 48); frname[11] = (char)(a % 10 + 48); } frname[13] = 'z'; JIN = fopen(frname, "a"); frname[13] = 'b'; HUR = fopen(frname, "a"); frname[13] = 't'; OUT = fopen(frname, "a"); frname[13] = 'i'; JYW = fopen(frname, "a"); frname[13] = 'd'; PRT = fopen(frname, "a"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].sum_beta[i]); fprintf(HUR, "\n"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].var_beta[i]); fprintf(HUR, "\n"); for (i = 1; i <= nITEM; i++) fprintf(HUR, "%.4f ", SCHOOL[a].acc_beta[i]); fprintf(HUR, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].sum_theta[i]); fprintf(OUT, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].var_theta[i]); fprintf(OUT, "\n"); for (i = 1; i <= ncount[a]; i++) fprintf(OUT, "%.4f ", SCHOOL[a].acc_theta[i]); fprintf(OUT, "\n"); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].sum_Zsamp[i][j]); fprintf(JIN, "\n"); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].var_Zsamp[i][j]); fprintf(JIN, "\n"); for (i = 1; i <= ncount[a]; i++) for (j = 1; j <= nDIM; j++) fprintf(JIN, "%.4f ", SCHOOL[a].acc_Zsamp[i]); fprintf(JIN, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].sum_Zitem[i][j]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].var_Zitem[i][j]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM; i++) for (j = 1; j <= nDIM; j++) fprintf(JYW, "%.4f ", SCHOOL[a].acc_Zitem[i]); fprintf(JYW, "\n"); for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) fprintf(PRT, "%.4f ", SCHOOL[a].sum_item_mat[i][j]); fprintf(PRT, "\n"); for (i = 2; i <= nITEM; i++) for (j = 1; j < i; j++) fprintf(PRT, "%.4f ", SCHOOL[a].var_item_mat[i][j]); fprintf(PRT, "\n"); fclose(JIN); fclose(HUR); fclose(OUT); fclose(JYW); fclose(PRT); } frname[0] = 'R'; frname[1] = 'E'; frname[2] = 'S'; frname[3] = 'U'; frname[4] = 'L'; frname[5] = 'T'; frname[6] = '/'; frname[7] = 's'; frname[8] = 'u'; frname[9] = 'm'; frname[10] = '_'; frname[12] = (char)(48 + MM); frname[13] = '.'; frname[14] = 'l'; frname[15] = 'o'; frname[16] = 'g'; frname[17] = '\0'; frname[11] = 'm'; JIN = fopen(frname, "a"); frname[11] = 's'; HUR = fopen(frname, "a"); frname[11] = 'l'; JYW = fopen(frname, "a"); frname[11] = 'u'; OUT = fopen(frname, "a"); frname[11] = 'g'; ASA = fopen(frname, "a"); frname[11] = 'p'; PRT = fopen(frname, "a"); frname[11] = 'h'; IMS = fopen(frname, "a"); frname[11] = 'a'; JJW = fopen(frname, "a"); for (i = 1; i <= nSCHOOL; i++) fprintf(HUR, "% .4f ", sum_sigma[i]); fprintf(HUR, "\n"); for (i = 1; i <= nSCHOOL; i++) fprintf(HUR, "% .4f ", var_sigma[i]); fprintf(HUR, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(OUT, "% .4f ", sum_tau[i]); fprintf(OUT, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(OUT, "% .4f ", var_tau[i]); fprintf(OUT, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JYW, "% .4f ", sum_delta[i]); fprintf(JYW, "\n"); for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JYW, "% .4f ", var_delta[i]); fprintf(JYW, "\n"); for (k = 1; k <= nSCHOOL; k++) { for (i = 1; i <= nITEM * (nITEM - 1) / 2; i++) fprintf(JIN, "% .4f ", sum_mu[i][k]); fprintf(JIN, "\n"); } for (i = 1; i <= nITEM; i++) fprintf(ASA, "% .4f ", sum_gamma[i]); fprintf(ASA, "\n"); for (i = 1; i <= nITEM; i++) fprintf(ASA, "% .4f ", var_gamma[i]); fprintf(ASA, "\n"); for (i = 1; i <= nITEM; i++) fprintf(PRT, "%.4f ", sum_varphi[i]); fprintf(PRT, "\n"); for (i = 1; i <= nITEM; i++) fprintf(PRT, "%.4f ", var_varphi[i]); fprintf(PRT, "\n"); for (k = 1; k <= nSCHOOL; k++) fprintf(IMS, "%.4f ", SCHOOL[k].sum_sigma); fprintf(IMS, "\n"); for (k = 1; k <= nSCHOOL; k++) fprintf(IMS, "%.4f ", SCHOOL[k].var_sigma); fprintf(IMS, "\n"); for (i = 1; i <= nSCHOOL; i++) { for (j = 1; j <= nSCHOOL; j++) fprintf(JJW, "%.4f ", sum_mu_dist[i][j]); fprintf(JJW, "\n"); } fclose(JIN); fclose(HUR); fclose(JYW); fclose(OUT); fclose(ASA); fclose(PRT); fclose(IMS); fclose(JJW); } /* * free_ivector(ncount, 1, nSCHOOL); free_dvector(jump_Z, 0, nITEM); * * for(k = 0; k <= nSCHOOL; k++){ for(i = 0; i <= nMAX; i++) * free(SCHOOL[k].dataset[i]); for(i = 0; i <= nITEM; i++){ for(a = 0; * a <= nMAX; a++) free(SCHOOL[k].Y[i][a]); free(SCHOOL[k].Y[i]); } * (i = 0; i <= nMAX; i++){ for(a = 0; a <= nITEM; a++) * free(SCHOOL[k].U[i][a]); free(SCHOOL[k].U[i]); } for(i = 0; i <= * nMAX; i++){free(SCHOOL[k].old_Zsamp[i]); * free(SCHOOL[k].new_Zsamp[i]);} for(i = 0; i <= nITEM; * i++){free(SCHOOL[k].old_Zitem[i]); free(SCHOOL[k].new_Zitem[i]);} * i = 0; i <= (niter-nburn)/thin; i++){ for(j = 0; j <= nMAX; j++) * free(SCHOOL[k].sample_Zsamp[i][j]); for(j = 0; j <= nITEM; j++) * free(SCHOOL[k].sample_Zitem[i][j]); free(SCHOOL[k].sample_beta[i]); * free(SCHOOL[k].sample_theta[i]); free(SCHOOL[k].sample_Zsamp[i]); * free(SCHOOL[k].sample_Zitem[i]); } for(i = 0; i <= nMAX; * i++){free(SCHOOL[k].sum_Zsamp[i]); free(SCHOOL[k].var_Zsamp[i]);} * i = 0; i <= nITEM; i++){free(SCHOOL[k].sum_Zitem[i]); * free(SCHOOL[k].var_Zitem[i]);} for(i = 0; i <= nITEM; * i++){free(SCHOOL[k].sum_item_mat[i]); * free(SCHOOL[k].var_item_mat[i]);} for(i = 0; i <= nITEM; * i++){free(SCHOOL[k].old_item_mat[i]); free(SCHOOL[k].new_item_mat[i]); * free(SCHOOL[k].sample_item_mat[i]);} free(SCHOOL[k].old_item_mat); * free(SCHOOL[k].new_item_mat); free(SCHOOL[k].oldbeta); * free(SCHOOL[k].newbeta); free(SCHOOL[k].oldtheta); * free(SCHOOL[k].newtheta); free(SCHOOL[k].count_item); * free(SCHOOL[k].count_samp); free(SCHOOL[k].Y); free(SCHOOL[k].U); * free(SCHOOL[k].dataset); free(SCHOOL[k].old_Zsamp); * free(SCHOOL[k].new_Zsamp); free(SCHOOL[k].old_Zitem); * free(SCHOOL[k].new_Zitem); free(SCHOOL[k].sample_beta); * free(SCHOOL[k].sample_theta); free(SCHOOL[k].sum_beta); * free(SCHOOL[k].var_beta); free(SCHOOL[k].acc_beta); * _theta); free(SCHOOL[k].var_theta); free(SCHOOL[k].acc_theta); * HOOL[k].sample_Zsamp); free(SCHOOL[k].sample_Zitem); * free(SCHOOL[k].sample_item_mat); free(SCHOOL[k].sum_Zsamp); * free(SCHOOL[k].var_Zsamp); free(SCHOOL[k].acc_Zsamp); * um_Zitem); free(SCHOOL[k].var_Zitem); free(SCHOOL[k].sum_item_mat); * free(SCHOOL[k].var_item_mat); free(SCHOOL[k].sample_sigma); * free(SCHOOL[k].mean_Z); } free(SCHOOL); * * free_dmatrix(sample_sigma, 1, (niter - nburn) / thin, 1, nSCHOOL); * matrix(sample_delta, 1, (niter - nburn) / thin, 1, nITEM * nDIM); * _dmatrix(sample_tau, 1, (niter - nburn) / thin, 1, nITEM * nDIM); * _dmatrix(sample_gamma, 1, (niter-nburn)/thin, 1, nITEM); * sample_varphi, 1, (niter-nburn)/thin, 1, nITEM); free_dmatrix(sum_mu, * 1, nITEM * nDIM, 0, nSCHOOL); free_dvector(sum_tau, 1, nITEM * nDIM); * ree_dvector(var_tau, 1, nITEM * nDIM); free_dvector(sum_sigma, 1, * nSCHOOL); free_dvector(var_sigma, 1, nSCHOOL); * , 1, nITEM * nDIM); free_dvector(var_delta, 1, nITEM * nDIM); * ector(sum_gamma, 1, nITEM); free_dvector(var_gamma, 1, nITEM); * vector(sum_varphi, 1, nITEM); free_dvector(var_varphi, 1, nITEM); * * free_dvector(oldsigma, 1, nSCHOOL); free_dvector(olddelta, 1, nITEM * * nDIM); free_dvector(oldtau, 1, nITEM * nDIM); free_dmatrix(oldmu, 1, * nITEM * nDIM, 0, nSCHOOL); free_dvector(oldgamma, 1, nITEM); * ctor(oldvarphi, 1, nITEM); * * free_dmatrix(mu_dist, 1, nSCHOOL, 1, nSCHOOL); free_dmatrix(sum_mu_dist, * 1, nSCHOOL, 1, nSCHOOL); free_dvector(avg_ran, 1, nSCHOOL); * tor(var_ran, 1, nSCHOOL); * * free_ivector(count, 1, nSCHOOL); * * free_dvector(sample_samp_like, 1, nMAX); free_dvector(new_samp_distance, * 1, nMAX); free_dvector(old_samp_distance, 1, nMAX); * * free_dvector(sample_item_like, 1, nMAX); free_dmatrix(new_item_distance, * 1, nITEM, 1, nITEM); free_dmatrix(old_item_distance, 1, nITEM, 1, * nITEM); */ return 0; }
GB_binop__lt_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_bool) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lt_bool) // A.*B function (eWiseMult): GB (_AemultB_03__lt_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_bool) // A*D function (colscale): GB (_AxD__lt_bool) // D*A function (rowscale): GB (_DxB__lt_bool) // C+=B function (dense accum): GB (_Cdense_accumB__lt_bool) // C+=b function (dense accum): GB (_Cdense_accumb__lt_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_bool) // C=scalar+B GB (_bind1st__lt_bool) // C=scalar+B' GB (_bind1st_tran__lt_bool) // C=A+scalar GB (_bind2nd__lt_bool) // C=A'+scalar GB (_bind2nd_tran__lt_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_BOOL || GxB_NO_LT_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lt_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_bool) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lt_bool) // A.*B function (eWiseMult): GB (_AemultB_03__lt_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_bool) // A*D function (colscale): GB (_AxD__lt_bool) // D*A function (rowscale): GB (_DxB__lt_bool) // C+=B function (dense accum): GB (_Cdense_accumB__lt_bool) // C+=b function (dense accum): GB (_Cdense_accumb__lt_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_bool) // C=scalar+B GB (_bind1st__lt_bool) // C=scalar+B' GB (_bind1st_tran__lt_bool) // C=A+scalar GB (_bind2nd__lt_bool) // C=A'+scalar GB (_bind2nd_tran__lt_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_BOOL || GxB_NO_LT_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lt_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_bool) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lt_bool) // A.*B function (eWiseMult): GB (_AemultB_03__lt_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_bool) // A*D function (colscale): GB (_AxD__lt_bool) // D*A function (rowscale): GB (_DxB__lt_bool) // C+=B function (dense accum): GB (_Cdense_accumB__lt_bool) // C+=b function (dense accum): GB (_Cdense_accumb__lt_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_bool) // C=scalar+B GB (_bind1st__lt_bool) // C=scalar+B' GB (_bind1st_tran__lt_bool) // C=A+scalar GB (_bind2nd__lt_bool) // C=A'+scalar GB (_bind2nd_tran__lt_bool) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_BOOL || GxB_NO_LT_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_bool) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lt_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lt_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolve.c
// Taken from astropy // Licensed under a 3-clause BSD style license /*----------------------------- WARNING! ----------------------------- * The C functions below are NOT designed to be called externally to * the Python function astropy/astropy/convolution/convolve.py. * They do NOT include any of the required correct usage checking. * *------------------------------- NOTES ------------------------------ * * The simplest implementation of convolution does not deal with any boundary * treatment, and pixels within half a kernel width of the edge of the image are * set to zero. In cases where a boundary mode is set, we pad the input array in * the Python code. In the 1D case, this means that the input array to the C * code has a size nx + nkx where nx is the original array size and nkx is the * size of the kernel. If we also padded the results array, then we could use * the exact same C code for the convolution, provided that the results array * was 'unpadded' in the Python code after the C code. * * However, to avoid needlessly padding the results array, we instead adjust the * index when accessing the results array - for example in the 1D case we shift * the index in the results array compared to the input array by half the kernel * size. This is done via the 'result_index' variable, and this behavior is * triggered by the 'embed_result_within_padded_region' setting. * */ #include <assert.h> #include <math.h> #include <stdbool.h> #include <stdlib.h> #include <stddef.h> #include "convolve.h" // Distutils on Windows automatically exports ``PyInit__convolve``, // create dummy to prevent linker complaining about missing symbol. #if defined(_MSC_VER) void PyInit__convolve(void) { return; } #endif #ifdef _OPENMP #include <omp.h> #endif void convolveNd_c(DTYPE * const result, const DTYPE * const f, const unsigned n_dim, const size_t * const image_shape, const DTYPE * const g, const size_t * const kernel_shape, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g || !image_shape || !kernel_shape) return; #else assert(result); assert(f); assert(g); assert(image_shape); assert(kernel_shape); #endif if (n_dim == 1) convolve1d_c(result, f, image_shape[0], g, kernel_shape[0], nan_interpolate, embed_result_within_padded_region, n_threads); else if (n_dim == 2) convolve2d_c(result, f, image_shape[0], image_shape[1], g, kernel_shape[0], kernel_shape[1], nan_interpolate, embed_result_within_padded_region, n_threads); else if (n_dim == 3) convolve3d_c(result, f, image_shape[0], image_shape[1], image_shape[2], g, kernel_shape[0], kernel_shape[1], kernel_shape[2], nan_interpolate, embed_result_within_padded_region, n_threads); else assert(0); // Unimplemented: n_dim > 3 } /*-------------------------PERFORMANCE NOTES-------------------------------- * The function wrappers below are designed to take advantage of the following: * The preprocessor will inline convolve<N>d(), effectively * expanding the two logical branches, replacing nan_interpolate * for their literal equivalents. The corresponding conditionals * within these functions will then be optimized away, this * being the goal - removing the unnecessary conditionals from * the loops without duplicating code. *-------------------------------------------------------------------------- */ void convolve1d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const DTYPE * const g, const size_t nkx, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve1d(result, f, nx, g, nkx, true, true, n_threads); else convolve1d(result, f, nx, g, nkx, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve1d(result, f, nx, g, nkx, false, true, n_threads); else convolve1d(result, f, nx, g, nkx, false, false, n_threads); } } void convolve2d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const size_t ny, const DTYPE * const g, const size_t nkx, const size_t nky, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve2d(result, f, nx, ny, g, nkx, nky, true, true, n_threads); else convolve2d(result, f, nx, ny, g, nkx, nky, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve2d(result, f, nx, ny, g, nkx, nky, false, true, n_threads); else convolve2d(result, f, nx, ny, g, nkx, nky, false, false, n_threads); } } void convolve3d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const size_t ny, const size_t nz, const DTYPE * const g, const size_t nkx, const size_t nky, const size_t nkz, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, true, true, n_threads); else convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, false, true, n_threads); else convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, false, false, n_threads); } } // 1D FORCE_INLINE void convolve1d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const DTYPE * const g, const size_t _nkx, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; #ifdef NDEBUG if (!(_nx > 2*_wkx)) return; #else assert(_nx > 2*_wkx); #endif #ifdef _OPENMP omp_set_num_threads(n_threads); // Set number of threads to use #pragma omp parallel { // Code within this block is threaded #endif // Copy these to thread locals to allow compiler to optimize (hoist/loads licm) // when threaded. Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx; const size_t nkx = _nkx; const size_t nkx_minus_1 = nkx - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; // Thread locals const size_t wkx = _wkx; const omp_iter_var nx_minus_wkx = nx - wkx; size_t i_minus_wkx; size_t result_index; DTYPE top, bot=0., ker, val; {omp_iter_var i; #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; top = 0.; if (nan_interpolate) // compile time constant bot = 0.; {omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { val = f[i_minus_wkx + ii]; ker = g[nkx_minus_1 - ii]; if (nan_interpolate) // compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; }} if (embed_result_within_padded_region) { // compile time constant result_index = i; } else { result_index = i_minus_wkx; } if (nan_interpolate) // compile time constant { if (bot == 0) // This should prob be np.isclose(kernel_sum, 0, atol=normalization_zero_tol) result[result_index] = f[i]; else result[result_index] = top / bot; } else result[result_index] = top; }} #ifdef _OPENMP }//end parallel scope #endif } // 2D FORCE_INLINE void convolve2d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const size_t _ny, const DTYPE * const g, const size_t _nkx, const size_t _nky, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; const size_t _wky = _nky / 2; #ifdef NDEBUG if (!(_nx > 2*_wkx) || !(_ny > 2*_wky)) return; #else assert(_nx > 2*_wkx); assert(_ny > 2*_wky); #endif #ifdef _OPENMP omp_set_num_threads(n_threads); // Set number of threads to use #pragma omp parallel { // Code within this block is threaded #endif // Copy these to thread locals to allow compiler to optimize (hoist/loads licm) // when threaded. Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx, ny = _ny; const size_t nkx = _nkx, nky = _nky; const size_t nkx_minus_1 = nkx - 1, nky_minus_1 = nky - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; // Thread locals const size_t wkx = _wkx; const size_t wky = _wky; const omp_iter_var nx_minus_wkx = nx - wkx; const omp_iter_var ny_minus_wky = ny - wky; const size_t ny_minus_2wky = ny - 2 * wky; size_t i_minus_wkx, j_minus_wky; size_t result_cursor; size_t f_cursor, g_cursor; size_t result_index; DTYPE top, bot=0., ker, val; {omp_iter_var i; #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; result_cursor = i*ny; {omp_iter_var j; for (j = wky; j < ny_minus_wky; ++j) { j_minus_wky = j - wky; top = 0.; if (nan_interpolate) // compile time constant bot = 0.; {omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { f_cursor = (i_minus_wkx + ii)*ny + j_minus_wky; g_cursor = (nkx_minus_1 - ii)*nky + nky_minus_1; {omp_iter_var jj; for (jj = 0; jj < nky; ++jj) { val = f[f_cursor + jj]; ker = g[g_cursor - jj]; if (nan_interpolate) // compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; }} }} if (embed_result_within_padded_region) { // compile time constant result_index = result_cursor + j; } else { result_index = i_minus_wkx * ny_minus_2wky + j_minus_wky; } if (nan_interpolate) // compile time constant { if (bot == 0) // This should prob be np.isclose(kernel_sum, 0, atol=normalization_zero_tol) result[result_index] = f[result_cursor + j] ; else result[result_index] = top / bot; } else result[result_index] = top; }} }} #ifdef _OPENMP }//end parallel scope #endif } // 3D FORCE_INLINE void convolve3d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const size_t _ny, const size_t _nz, const DTYPE * const g, const size_t _nkx, const size_t _nky, const size_t _nkz, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; const size_t _wky = _nky / 2; const size_t _wkz = _nkz / 2; #ifdef NDEBUG if (!(_nx > 2*_wkx) || !(_ny > 2*_wky) || !(_nz > 2*_wkz)) return; #else assert(_nx > 2*_wkx); assert(_ny > 2*_wky); assert(_nz > 2*_wkz); #endif #ifdef _OPENMP omp_set_num_threads(n_threads); // Set number of threads to use #pragma omp parallel { // Code within this block is threaded #endif // Copy these to thread locals to allow compiler to optimize (hoist/loads licm) // when threaded. Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx, ny = _ny, nz = _nz; const size_t nkx = _nkx, nky = _nky, nkz = _nkz; const size_t nkx_minus_1 = nkx - 1, nky_minus_1 = nky - 1, nkz_minus_1 = nkz - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; // Thread locals const size_t wkx = _wkx; const size_t wky = _wky; const size_t wkz = _wkz; const size_t nx_minus_wkx = nx - wkx; const omp_iter_var ny_minus_wky = ny - wky; const omp_iter_var nz_minus_wkz = nz - wkz; const size_t ny_minus_2wky = ny - 2 * wky; const size_t nz_minus_2wkz = nz - 2 * wkz; size_t i_minus_wkx, j_minus_wky, k_minus_wkz; size_t f_ii_cursor, g_ii_cursor; size_t f_cursor, g_cursor; size_t array_cursor, array_i_cursor; size_t result_index; DTYPE top, bot=0., ker, val; {omp_iter_var i; #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; array_i_cursor = i*ny; {omp_iter_var j; for (j = wky; j < ny_minus_wky; ++j) { j_minus_wky = j - wky; array_cursor = (array_i_cursor + j)*nz; {omp_iter_var k; for (k = wkz; k < nz_minus_wkz; ++k) { k_minus_wkz = k - wkz; top = 0.; if (nan_interpolate) // compile time constant bot = 0.; {omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { f_ii_cursor = ((i_minus_wkx + ii)*ny + j_minus_wky)*nz + k_minus_wkz; g_ii_cursor = ((nkx_minus_1 - ii)*nky + nky_minus_1)*nkz + nkz_minus_1; {omp_iter_var jj; for (jj = 0; jj < nky; ++jj) { f_cursor = f_ii_cursor + jj*nz; g_cursor = g_ii_cursor - jj*nkz; {omp_iter_var kk; for (kk = 0; kk < nkz; ++kk) { val = f[f_cursor + kk]; ker = g[g_cursor - kk]; if (nan_interpolate) // compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; }} }} }} if (embed_result_within_padded_region) { // compile time constant result_index = array_cursor + k; } else { result_index = (i_minus_wkx*ny_minus_2wky + j_minus_wky)*nz_minus_2wkz + k_minus_wkz; } if (nan_interpolate) // compile time constant { if (bot == 0) // This should prob be np.isclose(kernel_sum, 0, atol=normalization_zero_tol) result[result_index] = f[array_cursor+ k] ; else result[result_index] = top / bot; } else result[result_index] = top; }} }} }} #ifdef _OPENMP }//end parallel scope #endif }
// Taken from astropy // Licensed under a 3 - clause BSD style license /*----------------------------- WARNING! ----------------------------- * The C functions below are NOT designed to be called externally to * the Python function astropy/astropy/convolution/convolve.py. * They do NOT include any of the required correct usage checking. * *------------------------------- NOTES ------------------------------ * * The simplest implementation of convolution does not deal with any boundary * treatment, and pixels within half a kernel width of the edge of the image are * set to zero. In cases where a boundary mode is set, we pad the input array in * the Python code. In the 1D case, this means that the input array to the C * code has a size nx + nkx where nx is the original array size and nkx is the * size of the kernel. If we also padded the results array, then we could use * the exact same C code for the convolution, provided that the results array * was 'unpadded' in the Python code after the C code. * * However, to avoid needlessly padding the results array, we instead adjust the * index when accessing the results array - for example in the 1D case we shift * the index in the results array compared to the input array by half the kernel * size. This is done via the 'result_index' variable, and this behavior is * triggered by the 'embed_result_within_padded_region' setting. * */ #include <assert.h> #include <math.h> #include <stdbool.h> #include <stdlib.h> #include <stddef.h> #include "convolve.h" // Distutils on Windows automatically exports `` PyInit__convolve ``, //create dummy to prevent linker complaining about missing symbol. #if defined(_MSC_VER) void PyInit__convolve(void) { return; } #endif void convolveNd_c(DTYPE * const result, const DTYPE * const f, const unsigned n_dim, const size_t * const image_shape, const DTYPE * const g, const size_t * const kernel_shape, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g || !image_shape || !kernel_shape) return; #else assert(result); assert(f); assert(g); assert(image_shape); assert(kernel_shape); #endif if (n_dim == 1) convolve1d_c(result, f, image_shape[0], g, kernel_shape[0], nan_interpolate, embed_result_within_padded_region, n_threads); else if (n_dim == 2) convolve2d_c(result, f, image_shape[0], image_shape[1], g, kernel_shape[0], kernel_shape[1], nan_interpolate, embed_result_within_padded_region, n_threads); else if (n_dim == 3) convolve3d_c(result, f, image_shape[0], image_shape[1], image_shape[2], g, kernel_shape[0], kernel_shape[1], kernel_shape[2], nan_interpolate, embed_result_within_padded_region, n_threads); else assert(0); //Unimplemented:n_dim > 3 } /*-------------------------PERFORMANCE NOTES-------------------------------- * The function wrappers below are designed to take advantage of the following: * The preprocessor will inline convolve<N>d(), effectively * expanding the two logical branches, replacing nan_interpolate * for their literal equivalents. The corresponding conditionals * within these functions will then be optimized away, this * being the goal - removing the unnecessary conditionals from * the loops without duplicating code. *-------------------------------------------------------------------------- */ void convolve1d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const DTYPE * const g, const size_t nkx, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve1d(result, f, nx, g, nkx, true, true, n_threads); else convolve1d(result, f, nx, g, nkx, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve1d(result, f, nx, g, nkx, false, true, n_threads); else convolve1d(result, f, nx, g, nkx, false, false, n_threads); } } void convolve2d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const size_t ny, const DTYPE * const g, const size_t nkx, const size_t nky, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve2d(result, f, nx, ny, g, nkx, nky, true, true, n_threads); else convolve2d(result, f, nx, ny, g, nkx, nky, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve2d(result, f, nx, ny, g, nkx, nky, false, true, n_threads); else convolve2d(result, f, nx, ny, g, nkx, nky, false, false, n_threads); } } void convolve3d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const size_t ny, const size_t nz, const DTYPE * const g, const size_t nkx, const size_t nky, const size_t nkz, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, true, true, n_threads); else convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, false, true, n_threads); else convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, false, false, n_threads); } } //1 D FORCE_INLINE void convolve1d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const DTYPE * const g, const size_t _nkx, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; #ifdef NDEBUG if (!(_nx > 2 * _wkx)) return; #else assert(_nx > 2 * _wkx); #endif //Copy these to thread locals to allow compiler to optimize(hoist / loads licm) // when threaded.Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx; const size_t nkx = _nkx; const size_t nkx_minus_1 = nkx - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; //Thread locals const size_t wkx = _wkx; const omp_iter_var nx_minus_wkx = nx - wkx; size_t i_minus_wkx; size_t result_index; DTYPE top, bot = 0., ker, val; { omp_iter_var i; for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; top = 0.; if (nan_interpolate) //compile time constant bot = 0.; { omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { val = f[i_minus_wkx + ii]; ker = g[nkx_minus_1 - ii]; if (nan_interpolate) //compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; } } if (embed_result_within_padded_region) { //compile time constant result_index = i; } else { result_index = i_minus_wkx; } if (nan_interpolate) //compile time constant { if (bot == 0) //This should prob be np.isclose(kernel_sum, 0, atol = normalization_zero_tol) result[result_index] = f[i]; else result[result_index] = top / bot; } else result[result_index] = top; } } } //2 D FORCE_INLINE void convolve2d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const size_t _ny, const DTYPE * const g, const size_t _nkx, const size_t _nky, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; const size_t _wky = _nky / 2; #ifdef NDEBUG if (!(_nx > 2 * _wkx) || !(_ny > 2 * _wky)) return; #else assert(_nx > 2 * _wkx); assert(_ny > 2 * _wky); #endif //Copy these to thread locals to allow compiler to optimize(hoist / loads licm) // when threaded.Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx, ny = _ny; const size_t nkx = _nkx, nky = _nky; const size_t nkx_minus_1 = nkx - 1, nky_minus_1 = nky - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; //Thread locals const size_t wkx = _wkx; const size_t wky = _wky; const omp_iter_var nx_minus_wkx = nx - wkx; const omp_iter_var ny_minus_wky = ny - wky; const size_t ny_minus_2wky = ny - 2 * wky; size_t i_minus_wkx, j_minus_wky; size_t result_cursor; size_t f_cursor, g_cursor; size_t result_index; DTYPE top, bot = 0., ker, val; { omp_iter_var i; for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; result_cursor = i * ny; { omp_iter_var j; for (j = wky; j < ny_minus_wky; ++j) { j_minus_wky = j - wky; top = 0.; if (nan_interpolate) //compile time constant bot = 0.; { omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { f_cursor = (i_minus_wkx + ii) * ny + j_minus_wky; g_cursor = (nkx_minus_1 - ii) * nky + nky_minus_1; { omp_iter_var jj; for (jj = 0; jj < nky; ++jj) { val = f[f_cursor + jj]; ker = g[g_cursor - jj]; if (nan_interpolate) //compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; } } } } if (embed_result_within_padded_region) { //compile time constant result_index = result_cursor + j; } else { result_index = i_minus_wkx * ny_minus_2wky + j_minus_wky; } if (nan_interpolate) //compile time constant { if (bot == 0) //This should prob be np.isclose(kernel_sum, 0, atol = normalization_zero_tol) result[result_index] = f[result_cursor + j]; else result[result_index] = top / bot; } else result[result_index] = top; } } } } } //3 D FORCE_INLINE void convolve3d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const size_t _ny, const size_t _nz, const DTYPE * const g, const size_t _nkx, const size_t _nky, const size_t _nkz, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; const size_t _wky = _nky / 2; const size_t _wkz = _nkz / 2; #ifdef NDEBUG if (!(_nx > 2 * _wkx) || !(_ny > 2 * _wky) || !(_nz > 2 * _wkz)) return; #else assert(_nx > 2 * _wkx); assert(_ny > 2 * _wky); assert(_nz > 2 * _wkz); #endif //Copy these to thread locals to allow compiler to optimize(hoist / loads licm) // when threaded.Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx, ny = _ny, nz = _nz; const size_t nkx = _nkx, nky = _nky, nkz = _nkz; const size_t nkx_minus_1 = nkx - 1, nky_minus_1 = nky - 1, nkz_minus_1 = nkz - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; //Thread locals const size_t wkx = _wkx; const size_t wky = _wky; const size_t wkz = _wkz; const size_t nx_minus_wkx = nx - wkx; const omp_iter_var ny_minus_wky = ny - wky; const omp_iter_var nz_minus_wkz = nz - wkz; const size_t ny_minus_2wky = ny - 2 * wky; const size_t nz_minus_2wkz = nz - 2 * wkz; size_t i_minus_wkx, j_minus_wky, k_minus_wkz; size_t f_ii_cursor, g_ii_cursor; size_t f_cursor, g_cursor; size_t array_cursor, array_i_cursor; size_t result_index; DTYPE top, bot = 0., ker, val; { omp_iter_var i; for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; array_i_cursor = i * ny; { omp_iter_var j; for (j = wky; j < ny_minus_wky; ++j) { j_minus_wky = j - wky; array_cursor = (array_i_cursor + j) * nz; { omp_iter_var k; for (k = wkz; k < nz_minus_wkz; ++k) { k_minus_wkz = k - wkz; top = 0.; if (nan_interpolate) //compile time constant bot = 0.; { omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { f_ii_cursor = ((i_minus_wkx + ii) * ny + j_minus_wky) * nz + k_minus_wkz; g_ii_cursor = ((nkx_minus_1 - ii) * nky + nky_minus_1) * nkz + nkz_minus_1; { omp_iter_var jj; for (jj = 0; jj < nky; ++jj) { f_cursor = f_ii_cursor + jj * nz; g_cursor = g_ii_cursor - jj * nkz; { omp_iter_var kk; for (kk = 0; kk < nkz; ++kk) { val = f[f_cursor + kk]; ker = g[g_cursor - kk]; if (nan_interpolate) //compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; } } } } } } if (embed_result_within_padded_region) { //compile time constant result_index = array_cursor + k; } else { result_index = (i_minus_wkx * ny_minus_2wky + j_minus_wky) * nz_minus_2wkz + k_minus_wkz; } if (nan_interpolate) //compile time constant { if (bot == 0) //This should prob be np.isclose(kernel_sum, 0, atol = normalization_zero_tol) result[result_index] = f[array_cursor + k]; else result[result_index] = top / bot; } else result[result_index] = top; } } } } } } }
// Taken from astropy // Licensed under a 3 - clause BSD style license /*----------------------------- WARNING! ----------------------------- * The C functions below are NOT designed to be called externally to * the Python function astropy/astropy/convolution/convolve.py. * They do NOT include any of the required correct usage checking. * *------------------------------- NOTES ------------------------------ * * The simplest implementation of convolution does not deal with any boundary * treatment, and pixels within half a kernel width of the edge of the image are * set to zero. In cases where a boundary mode is set, we pad the input array in * the Python code. In the 1D case, this means that the input array to the C * code has a size nx + nkx where nx is the original array size and nkx is the * size of the kernel. If we also padded the results array, then we could use * the exact same C code for the convolution, provided that the results array * was 'unpadded' in the Python code after the C code. * * However, to avoid needlessly padding the results array, we instead adjust the * index when accessing the results array - for example in the 1D case we shift * the index in the results array compared to the input array by half the kernel * size. This is done via the 'result_index' variable, and this behavior is * triggered by the 'embed_result_within_padded_region' setting. * */ #include <assert.h> #include <math.h> #include <stdbool.h> #include <stdlib.h> #include <stddef.h> #include "convolve.h" // Distutils on Windows automatically exports `` PyInit__convolve ``, //create dummy to prevent linker complaining about missing symbol. #if defined(_MSC_VER) void PyInit__convolve(void) { return; } #endif #ifdef _OPENMP #include <omp.h> #endif void convolveNd_c(DTYPE * const result, const DTYPE * const f, const unsigned n_dim, const size_t * const image_shape, const DTYPE * const g, const size_t * const kernel_shape, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g || !image_shape || !kernel_shape) return; #else assert(result); assert(f); assert(g); assert(image_shape); assert(kernel_shape); #endif if (n_dim == 1) convolve1d_c(result, f, image_shape[0], g, kernel_shape[0], nan_interpolate, embed_result_within_padded_region, n_threads); else if (n_dim == 2) convolve2d_c(result, f, image_shape[0], image_shape[1], g, kernel_shape[0], kernel_shape[1], nan_interpolate, embed_result_within_padded_region, n_threads); else if (n_dim == 3) convolve3d_c(result, f, image_shape[0], image_shape[1], image_shape[2], g, kernel_shape[0], kernel_shape[1], kernel_shape[2], nan_interpolate, embed_result_within_padded_region, n_threads); else assert(0); //Unimplemented:n_dim > 3 } /*-------------------------PERFORMANCE NOTES-------------------------------- * The function wrappers below are designed to take advantage of the following: * The preprocessor will inline convolve<N>d(), effectively * expanding the two logical branches, replacing nan_interpolate * for their literal equivalents. The corresponding conditionals * within these functions will then be optimized away, this * being the goal - removing the unnecessary conditionals from * the loops without duplicating code. *-------------------------------------------------------------------------- */ void convolve1d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const DTYPE * const g, const size_t nkx, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve1d(result, f, nx, g, nkx, true, true, n_threads); else convolve1d(result, f, nx, g, nkx, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve1d(result, f, nx, g, nkx, false, true, n_threads); else convolve1d(result, f, nx, g, nkx, false, false, n_threads); } } void convolve2d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const size_t ny, const DTYPE * const g, const size_t nkx, const size_t nky, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve2d(result, f, nx, ny, g, nkx, nky, true, true, n_threads); else convolve2d(result, f, nx, ny, g, nkx, nky, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve2d(result, f, nx, ny, g, nkx, nky, false, true, n_threads); else convolve2d(result, f, nx, ny, g, nkx, nky, false, false, n_threads); } } void convolve3d_c(DTYPE * const result, const DTYPE * const f, const size_t nx, const size_t ny, const size_t nz, const DTYPE * const g, const size_t nkx, const size_t nky, const size_t nkz, const bool nan_interpolate, const bool embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif if (nan_interpolate) { if (embed_result_within_padded_region) convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, true, true, n_threads); else convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, true, false, n_threads); } else { if (embed_result_within_padded_region) convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, false, true, n_threads); else convolve3d(result, f, nx, ny, nz, g, nkx, nky, nkz, false, false, n_threads); } } //1 D FORCE_INLINE void convolve1d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const DTYPE * const g, const size_t _nkx, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; #ifdef NDEBUG if (!(_nx > 2 * _wkx)) return; #else assert(_nx > 2 * _wkx); #endif #ifdef _OPENMP omp_set_num_threads(n_threads); //Set number of threads to use #pragma omp parallel { //Code within this block is threaded #endif // Copy these to thread locals to allow compiler to optimize(hoist / loads licm) // when threaded.Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx; const size_t nkx = _nkx; const size_t nkx_minus_1 = nkx - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; //Thread locals const size_t wkx = _wkx; const omp_iter_var nx_minus_wkx = nx - wkx; size_t i_minus_wkx; size_t result_index; DTYPE top, bot = 0., ker, val; { omp_iter_var i; #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; top = 0.; if (nan_interpolate) //compile time constant bot = 0.; { omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { val = f[i_minus_wkx + ii]; ker = g[nkx_minus_1 - ii]; if (nan_interpolate) //compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; } } if (embed_result_within_padded_region) { //compile time constant result_index = i; } else { result_index = i_minus_wkx; } if (nan_interpolate) //compile time constant { if (bot == 0) //This should prob be np.isclose(kernel_sum, 0, atol = normalization_zero_tol) result[result_index] = f[i]; else result[result_index] = top / bot; } else result[result_index] = top; } } #ifdef _OPENMP } //end parallel scope #endif } //2 D FORCE_INLINE void convolve2d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const size_t _ny, const DTYPE * const g, const size_t _nkx, const size_t _nky, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; const size_t _wky = _nky / 2; #ifdef NDEBUG if (!(_nx > 2 * _wkx) || !(_ny > 2 * _wky)) return; #else assert(_nx > 2 * _wkx); assert(_ny > 2 * _wky); #endif #ifdef _OPENMP omp_set_num_threads(n_threads); //Set number of threads to use #pragma omp parallel { //Code within this block is threaded #endif // Copy these to thread locals to allow compiler to optimize(hoist / loads licm) // when threaded.Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx, ny = _ny; const size_t nkx = _nkx, nky = _nky; const size_t nkx_minus_1 = nkx - 1, nky_minus_1 = nky - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; //Thread locals const size_t wkx = _wkx; const size_t wky = _wky; const omp_iter_var nx_minus_wkx = nx - wkx; const omp_iter_var ny_minus_wky = ny - wky; const size_t ny_minus_2wky = ny - 2 * wky; size_t i_minus_wkx, j_minus_wky; size_t result_cursor; size_t f_cursor, g_cursor; size_t result_index; DTYPE top, bot = 0., ker, val; { omp_iter_var i; #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; result_cursor = i * ny; { omp_iter_var j; for (j = wky; j < ny_minus_wky; ++j) { j_minus_wky = j - wky; top = 0.; if (nan_interpolate) //compile time constant bot = 0.; { omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { f_cursor = (i_minus_wkx + ii) * ny + j_minus_wky; g_cursor = (nkx_minus_1 - ii) * nky + nky_minus_1; { omp_iter_var jj; for (jj = 0; jj < nky; ++jj) { val = f[f_cursor + jj]; ker = g[g_cursor - jj]; if (nan_interpolate) //compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; } } } } if (embed_result_within_padded_region) { //compile time constant result_index = result_cursor + j; } else { result_index = i_minus_wkx * ny_minus_2wky + j_minus_wky; } if (nan_interpolate) //compile time constant { if (bot == 0) //This should prob be np.isclose(kernel_sum, 0, atol = normalization_zero_tol) result[result_index] = f[result_cursor + j]; else result[result_index] = top / bot; } else result[result_index] = top; } } } } #ifdef _OPENMP } //end parallel scope #endif } //3 D FORCE_INLINE void convolve3d(DTYPE * const result, const DTYPE * const f, const size_t _nx, const size_t _ny, const size_t _nz, const DTYPE * const g, const size_t _nkx, const size_t _nky, const size_t _nkz, const bool _nan_interpolate, const bool _embed_result_within_padded_region, const unsigned n_threads) { #ifdef NDEBUG if (!result || !f || !g) return; #else assert(result); assert(f); assert(g); #endif const size_t _wkx = _nkx / 2; const size_t _wky = _nky / 2; const size_t _wkz = _nkz / 2; #ifdef NDEBUG if (!(_nx > 2 * _wkx) || !(_ny > 2 * _wky) || !(_nz > 2 * _wkz)) return; #else assert(_nx > 2 * _wkx); assert(_ny > 2 * _wky); assert(_nz > 2 * _wkz); #endif #ifdef _OPENMP omp_set_num_threads(n_threads); //Set number of threads to use #pragma omp parallel { //Code within this block is threaded #endif // Copy these to thread locals to allow compiler to optimize(hoist / loads licm) // when threaded.Without these, compile time constant conditionals may // not be optimized away. const size_t nx = _nx, ny = _ny, nz = _nz; const size_t nkx = _nkx, nky = _nky, nkz = _nkz; const size_t nkx_minus_1 = nkx - 1, nky_minus_1 = nky - 1, nkz_minus_1 = nkz - 1; const bool nan_interpolate = _nan_interpolate; const bool embed_result_within_padded_region = _embed_result_within_padded_region; //Thread locals const size_t wkx = _wkx; const size_t wky = _wky; const size_t wkz = _wkz; const size_t nx_minus_wkx = nx - wkx; const omp_iter_var ny_minus_wky = ny - wky; const omp_iter_var nz_minus_wkz = nz - wkz; const size_t ny_minus_2wky = ny - 2 * wky; const size_t nz_minus_2wkz = nz - 2 * wkz; size_t i_minus_wkx, j_minus_wky, k_minus_wkz; size_t f_ii_cursor, g_ii_cursor; size_t f_cursor, g_cursor; size_t array_cursor, array_i_cursor; size_t result_index; DTYPE top, bot = 0., ker, val; { omp_iter_var i; #ifdef _OPENMP #pragma omp for schedule(dynamic) #endif for (i = wkx; i < nx_minus_wkx; ++i) { i_minus_wkx = i - wkx; array_i_cursor = i * ny; { omp_iter_var j; for (j = wky; j < ny_minus_wky; ++j) { j_minus_wky = j - wky; array_cursor = (array_i_cursor + j) * nz; { omp_iter_var k; for (k = wkz; k < nz_minus_wkz; ++k) { k_minus_wkz = k - wkz; top = 0.; if (nan_interpolate) //compile time constant bot = 0.; { omp_iter_var ii; for (ii = 0; ii < nkx; ++ii) { f_ii_cursor = ((i_minus_wkx + ii) * ny + j_minus_wky) * nz + k_minus_wkz; g_ii_cursor = ((nkx_minus_1 - ii) * nky + nky_minus_1) * nkz + nkz_minus_1; { omp_iter_var jj; for (jj = 0; jj < nky; ++jj) { f_cursor = f_ii_cursor + jj * nz; g_cursor = g_ii_cursor - jj * nkz; { omp_iter_var kk; for (kk = 0; kk < nkz; ++kk) { val = f[f_cursor + kk]; ker = g[g_cursor - kk]; if (nan_interpolate) //compile time constant { if (!isnan(val)) { top += val * ker; bot += ker; } } else top += val * ker; } } } } } } if (embed_result_within_padded_region) { //compile time constant result_index = array_cursor + k; } else { result_index = (i_minus_wkx * ny_minus_2wky + j_minus_wky) * nz_minus_2wkz + k_minus_wkz; } if (nan_interpolate) //compile time constant { if (bot == 0) //This should prob be np.isclose(kernel_sum, 0, atol = normalization_zero_tol) result[result_index] = f[array_cursor + k]; else result[result_index] = top / bot; } else result[result_index] = top; } } } } } } #ifdef _OPENMP } //end parallel scope #endif }
GB_unop__identity_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_uint8) // op(A') function: GB (_unop_tran__identity_uint16_uint8) // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_uint8) ( uint16_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_uint8) // op(A') function: GB (_unop_tran__identity_uint16_uint8) // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_uint8) ( uint16_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_uint8) // op(A') function: GB (_unop_tran__identity_uint16_uint8) // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_uint8) ( uint16_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kmp_sch_simd_runtime_static.c
// RUN: %libomp-compile && %libomp-run // RUN: %libomp-run 1 && %libomp-run 2 // The test checks schedule(simd:runtime) // in combination with OMP_SCHEDULE=static[,chunk] #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #define seten(a,b,c) _putenv_s((a),(b)) #else #include <unistd.h> #define delay() usleep(10); #define seten(a,b,c) setenv((a),(b),(c)) #endif #define SIMD_LEN 4 int err = 0; // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL. enum sched { kmp_sch_static_balanced_chunked = 45, kmp_sch_guided_simd = 46, kmp_sch_runtime_simd = 47, }; typedef unsigned u32; typedef long long i64; typedef unsigned long long u64; typedef struct { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; #ifdef __cplusplus extern "C" { #endif int __kmpc_global_thread_num(id*); void __kmpc_barrier(id*, int gtid); void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int); void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64); int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*); int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*); #ifdef __cplusplus } // extern "C" #endif // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; // --------------------------------------------------------------------------- void run_loop( int loop_lb, // Loop lower bound. int loop_ub, // Loop upper bound. int loop_st, // Loop stride. int lchunk ) { static int volatile loop_sync = 0; int lb; // Chunk lower bound. int ub; // Chunk upper bound. int st; // Chunk stride. int rc; int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); int gtid = __kmpc_global_thread_num(&loc); int last; int tc = (loop_ub - loop_lb) / loop_st + 1; int ch; int no_chunk = 0; if (lchunk == 0) { no_chunk = 1; lchunk = 1; } ch = lchunk * SIMD_LEN; #if _DEBUG > 1 printf("run_loop gtid %d tid %d (lb=%d, ub=%d, st=%d, ch=%d)\n", gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, lchunk); #endif // Don't test degenerate cases that should have been discovered by codegen. if (loop_st == 0) return; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return; __kmpc_dispatch_init_4(&loc, gtid, kmp_sch_runtime_simd, loop_lb, loop_ub, loop_st, SIMD_LEN); { // Let the master thread handle the chunks alone. int chunk; // No of current chunk. int last_ub; // Upper bound of the last processed chunk. u64 cur; // Number of interations in current chunk. u64 max; // Max allowed iterations for current chunk. int undersized = 0; last_ub = loop_ub; chunk = 0; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations. while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if _DEBUG printf("th %d: chunk=%d, lb=%d, ub=%d ch %d\n", tid, chunk, (int)lb, (int)ub, (int)(ub-lb+1)); #endif // Check if previous chunk (it is not the final chunk) is undersized. if (undersized) printf("Error with chunk %d, th %d, err %d\n", chunk, tid, ++err); if (loop_st > 0) { if (!(ub <= loop_ub)) printf("Error with ub %d, %d, ch %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb <= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); } else { if (!(ub >= loop_ub)) printf("Error with ub %d, %d, %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb >= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); }; // if // Stride should not change. if (!(st == loop_st)) printf("Error with st %d, %d, ch %d, err %d\n", (int)st, (int)loop_st, chunk, ++err); cur = ( ub - lb ) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum. if (!( cur <= max + 1)) printf("Error with iter %llu, %llu, err %d\n", cur, max, ++err); // Update maximum for the next chunk. if (last) { if (!no_chunk && cur > ch && nthreads > 1) printf("Error: too big last chunk %d (%d), tid %d, err %d\n", (int)cur, ch, tid, ++err); } else { if (cur % ch) printf("Error with chunk %d, %d, ch %d, tid %d, err %d\n", chunk, (int)cur, ch, tid, ++err); } if (cur < max) max = cur; last_ub = ub; undersized = (cur < ch); #if _DEBUG > 1 if (last) printf("under%d cur %d, ch %d, tid %d, ub %d, lb %d, st %d =======\n", undersized,cur,ch,tid,ub,lb,loop_st); #endif } // while // Must have the right last iteration index. if (loop_st > 0) { if (!(last_ub <= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st > loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } else { if (!(last_ub >= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st < loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } // if } __kmpc_barrier(&loc, gtid); } // run_loop int main(int argc, char *argv[]) { int chunk = 0; if (argc > 1) { char *buf = malloc(8 + strlen(argv[1])); // expect chunk size as a parameter chunk = atoi(argv[1]); strcpy(buf,"static,"); strcat(buf,argv[1]); seten("OMP_SCHEDULE",buf,1); printf("Testing schedule(simd:%s)\n", buf); free(buf); } else { seten("OMP_SCHEDULE","static",1); printf("Testing schedule(simd:static)\n"); } #pragma omp parallel// num_threads(num_th) run_loop(0, 26, 1, chunk); if (err) { printf("failed, err = %d\n", err); return 1; } else { printf("passed\n"); return 0; } }
// RUN: %libomp-compile && %libomp-run // RUN: %libomp-run 1 && %libomp-run 2 // The test checks schedule(simd:runtime) // in combination with OMP_SCHEDULE=static[,chunk] #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #define seten(a,b,c) _putenv_s((a),(b)) #else #include <unistd.h> #define delay() usleep(10); #define seten(a,b,c) setenv((a),(b),(c)) #endif #define SIMD_LEN 4 int err = 0; // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL. enum sched { kmp_sch_static_balanced_chunked = 45, kmp_sch_guided_simd = 46, kmp_sch_runtime_simd = 47, }; typedef unsigned u32; typedef long long i64; typedef unsigned long long u64; typedef struct { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; #ifdef __cplusplus extern "C" { #endif int __kmpc_global_thread_num(id*); void __kmpc_barrier(id*, int gtid); void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int); void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64); int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*); int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*); #ifdef __cplusplus } // extern "C" #endif // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; // --------------------------------------------------------------------------- void run_loop( int loop_lb, // Loop lower bound. int loop_ub, // Loop upper bound. int loop_st, // Loop stride. int lchunk ) { static int volatile loop_sync = 0; int lb; // Chunk lower bound. int ub; // Chunk upper bound. int st; // Chunk stride. int rc; int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); int gtid = __kmpc_global_thread_num(&loc); int last; int tc = (loop_ub - loop_lb) / loop_st + 1; int ch; int no_chunk = 0; if (lchunk == 0) { no_chunk = 1; lchunk = 1; } ch = lchunk * SIMD_LEN; #if _DEBUG > 1 printf("run_loop gtid %d tid %d (lb=%d, ub=%d, st=%d, ch=%d)\n", gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, lchunk); #endif // Don't test degenerate cases that should have been discovered by codegen. if (loop_st == 0) return; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return; __kmpc_dispatch_init_4(&loc, gtid, kmp_sch_runtime_simd, loop_lb, loop_ub, loop_st, SIMD_LEN); { // Let the master thread handle the chunks alone. int chunk; // No of current chunk. int last_ub; // Upper bound of the last processed chunk. u64 cur; // Number of interations in current chunk. u64 max; // Max allowed iterations for current chunk. int undersized = 0; last_ub = loop_ub; chunk = 0; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations. while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if _DEBUG printf("th %d: chunk=%d, lb=%d, ub=%d ch %d\n", tid, chunk, (int)lb, (int)ub, (int)(ub-lb+1)); #endif // Check if previous chunk (it is not the final chunk) is undersized. if (undersized) printf("Error with chunk %d, th %d, err %d\n", chunk, tid, ++err); if (loop_st > 0) { if (!(ub <= loop_ub)) printf("Error with ub %d, %d, ch %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb <= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); } else { if (!(ub >= loop_ub)) printf("Error with ub %d, %d, %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb >= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); }; // if // Stride should not change. if (!(st == loop_st)) printf("Error with st %d, %d, ch %d, err %d\n", (int)st, (int)loop_st, chunk, ++err); cur = ( ub - lb ) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum. if (!( cur <= max + 1)) printf("Error with iter %llu, %llu, err %d\n", cur, max, ++err); // Update maximum for the next chunk. if (last) { if (!no_chunk && cur > ch && nthreads > 1) printf("Error: too big last chunk %d (%d), tid %d, err %d\n", (int)cur, ch, tid, ++err); } else { if (cur % ch) printf("Error with chunk %d, %d, ch %d, tid %d, err %d\n", chunk, (int)cur, ch, tid, ++err); } if (cur < max) max = cur; last_ub = ub; undersized = (cur < ch); #if _DEBUG > 1 if (last) printf("under%d cur %d, ch %d, tid %d, ub %d, lb %d, st %d =======\n", undersized,cur,ch,tid,ub,lb,loop_st); #endif } // while // Must have the right last iteration index. if (loop_st > 0) { if (!(last_ub <= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st > loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } else { if (!(last_ub >= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st < loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } // if } __kmpc_barrier(&loc, gtid); } // run_loop int main(int argc, char *argv[]) { int chunk = 0; if (argc > 1) { char *buf = malloc(8 + strlen(argv[1])); // expect chunk size as a parameter chunk = atoi(argv[1]); strcpy(buf,"static,"); strcat(buf,argv[1]); seten("OMP_SCHEDULE",buf,1); printf("Testing schedule(simd:%s)\n", buf); free(buf); } else { seten("OMP_SCHEDULE","static",1); printf("Testing schedule(simd:static)\n"); } run_loop(0, 26, 1, chunk); if (err) { printf("failed, err = %d\n", err); return 1; } else { printf("passed\n"); return 0; } }
// RUN: %libomp-compile && %libomp-run // RUN: %libomp-run 1 && %libomp-run 2 // The test checks schedule(simd:runtime) // in combination with OMP_SCHEDULE=static[,chunk] #include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #define seten(a,b,c) _putenv_s((a),(b)) #else #include <unistd.h> #define delay() usleep(10); #define seten(a,b,c) setenv((a),(b),(c)) #endif #define SIMD_LEN 4 int err = 0; // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL. enum sched { kmp_sch_static_balanced_chunked = 45, kmp_sch_guided_simd = 46, kmp_sch_runtime_simd = 47, }; typedef unsigned u32; typedef long long i64; typedef unsigned long long u64; typedef struct { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; #ifdef __cplusplus extern "C" { #endif int __kmpc_global_thread_num(id*); void __kmpc_barrier(id*, int gtid); void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int); void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64); int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*); int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*); #ifdef __cplusplus } // extern "C" #endif // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; // --------------------------------------------------------------------------- void run_loop( int loop_lb, // Loop lower bound. int loop_ub, // Loop upper bound. int loop_st, // Loop stride. int lchunk ) { static int volatile loop_sync = 0; int lb; // Chunk lower bound. int ub; // Chunk upper bound. int st; // Chunk stride. int rc; int nthreads = omp_get_num_threads(); int tid = omp_get_thread_num(); int gtid = __kmpc_global_thread_num(&loc); int last; int tc = (loop_ub - loop_lb) / loop_st + 1; int ch; int no_chunk = 0; if (lchunk == 0) { no_chunk = 1; lchunk = 1; } ch = lchunk * SIMD_LEN; #if _DEBUG > 1 printf("run_loop gtid %d tid %d (lb=%d, ub=%d, st=%d, ch=%d)\n", gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, lchunk); #endif // Don't test degenerate cases that should have been discovered by codegen. if (loop_st == 0) return; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return; __kmpc_dispatch_init_4(&loc, gtid, kmp_sch_runtime_simd, loop_lb, loop_ub, loop_st, SIMD_LEN); { // Let the master thread handle the chunks alone. int chunk; // No of current chunk. int last_ub; // Upper bound of the last processed chunk. u64 cur; // Number of interations in current chunk. u64 max; // Max allowed iterations for current chunk. int undersized = 0; last_ub = loop_ub; chunk = 0; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations. while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if _DEBUG printf("th %d: chunk=%d, lb=%d, ub=%d ch %d\n", tid, chunk, (int)lb, (int)ub, (int)(ub-lb+1)); #endif // Check if previous chunk (it is not the final chunk) is undersized. if (undersized) printf("Error with chunk %d, th %d, err %d\n", chunk, tid, ++err); if (loop_st > 0) { if (!(ub <= loop_ub)) printf("Error with ub %d, %d, ch %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb <= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); } else { if (!(ub >= loop_ub)) printf("Error with ub %d, %d, %d, err %d\n", (int)ub, (int)loop_ub, chunk, ++err); if (!(lb >= ub)) printf("Error with bounds %d, %d, %d, err %d\n", (int)lb, (int)ub, chunk, ++err); }; // if // Stride should not change. if (!(st == loop_st)) printf("Error with st %d, %d, ch %d, err %d\n", (int)st, (int)loop_st, chunk, ++err); cur = ( ub - lb ) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum. if (!( cur <= max + 1)) printf("Error with iter %llu, %llu, err %d\n", cur, max, ++err); // Update maximum for the next chunk. if (last) { if (!no_chunk && cur > ch && nthreads > 1) printf("Error: too big last chunk %d (%d), tid %d, err %d\n", (int)cur, ch, tid, ++err); } else { if (cur % ch) printf("Error with chunk %d, %d, ch %d, tid %d, err %d\n", chunk, (int)cur, ch, tid, ++err); } if (cur < max) max = cur; last_ub = ub; undersized = (cur < ch); #if _DEBUG > 1 if (last) printf("under%d cur %d, ch %d, tid %d, ub %d, lb %d, st %d =======\n", undersized,cur,ch,tid,ub,lb,loop_st); #endif } // while // Must have the right last iteration index. if (loop_st > 0) { if (!(last_ub <= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st > loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } else { if (!(last_ub >= loop_ub)) printf("Error with last1 %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_ub, chunk, ++err); if (last && !(last_ub + loop_st < loop_ub)) printf("Error with last2 %d, %d, %d, ch %d, err %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk, ++err); } // if } __kmpc_barrier(&loc, gtid); } // run_loop int main(int argc, char *argv[]) { int chunk = 0; if (argc > 1) { char *buf = malloc(8 + strlen(argv[1])); // expect chunk size as a parameter chunk = atoi(argv[1]); strcpy(buf,"static,"); strcat(buf,argv[1]); seten("OMP_SCHEDULE",buf,1); printf("Testing schedule(simd:%s)\n", buf); free(buf); } else { seten("OMP_SCHEDULE","static",1); printf("Testing schedule(simd:static)\n"); } #pragma omp parallel// num_threads(num_th) run_loop(0, 26, 1, chunk); if (err) { printf("failed, err = %d\n", err); return 1; } else { printf("passed\n"); return 0; } }
c-parser.c
/* Modula-3: modified */ /* Parser for C and Objective-C. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc. Parser actions based on the old Bison parser; structure somewhat influenced by and fragments based on the C++ parser. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* TODO: Make sure all relevant comments, and all relevant code from all actions, brought over from old parser. Verify exact correspondence of syntax accepted. Add testcases covering every input symbol in every state in old and new parsers. Include full syntax for GNU C, including erroneous cases accepted with error messages, in syntax productions in comments. Make more diagnostics in the front end generally take an explicit location rather than implicitly using input_location. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "langhooks.h" #include "input.h" #include "cpplib.h" #include "timevar.h" #include "c-pragma.h" #include "c-tree.h" #include "flags.h" #include "output.h" #include "toplev.h" #include "ggc.h" #include "c-common.h" #include "vec.h" #include "target.h" #include "cgraph.h" #include "plugin.h" #include "except.h" #ifdef __cplusplus extern "C" { #endif /* Initialization routine for this file. */ void c_parse_init (void) { /* The only initialization required is of the reserved word identifiers. */ unsigned int i; tree id; int mask = 0; /* Make sure RID_MAX hasn't grown past the 8 bits used to hold the keyword in the c_token structure. */ gcc_assert (RID_MAX <= 255); mask |= D_CXXONLY; if (!flag_isoc99) mask |= D_C99; if (flag_no_asm) { mask |= D_ASM | D_EXT; if (!flag_isoc99) mask |= D_EXT89; } if (!c_dialect_objc ()) mask |= D_OBJC | D_CXX_OBJC; ridpointers = GGC_CNEWVEC (tree, (int) RID_MAX); for (i = 0; i < num_c_common_reswords; i++) { /* If a keyword is disabled, do not enter it into the table and so create a canonical spelling that isn't a keyword. */ if (c_common_reswords[i].disable & mask) { if (warn_cxx_compat && (c_common_reswords[i].disable & D_CXXWARN)) { id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, RID_CXX_COMPAT_WARN); C_IS_RESERVED_WORD (id) = 1; } continue; } id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, c_common_reswords[i].rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [(int) c_common_reswords[i].rid] = id; } } /* The C lexer intermediates between the lexer in cpplib and c-lex.c and the C parser. Unlike the C++ lexer, the parser structure stores the lexer information instead of using a separate structure. Identifiers are separated into ordinary identifiers, type names, keywords and some other Objective-C types of identifiers, and some look-ahead is maintained. ??? It might be a good idea to lex the whole file up front (as for C++). It would then be possible to share more of the C and C++ lexer code, if desired. */ /* The following local token type is used. */ /* A keyword. */ #define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1)) /* More information about the type of a CPP_NAME token. */ typedef enum c_id_kind { /* An ordinary identifier. */ C_ID_ID, /* An identifier declared as a typedef name. */ C_ID_TYPENAME, /* An identifier declared as an Objective-C class name. */ C_ID_CLASSNAME, /* An address space identifier. */ C_ID_ADDRSPACE, /* Not an identifier. */ C_ID_NONE } c_id_kind; /* A single C token after string literal concatenation and conversion of preprocessing tokens to tokens. */ typedef struct GTY (()) c_token { /* The kind of token. */ ENUM_BITFIELD (cpp_ttype, type, 8); /* If this token is a CPP_NAME, this value indicates whether also declared as some kind of type. Otherwise, it is C_ID_NONE. */ ENUM_BITFIELD (c_id_kind, id_kind, 8); /* If this token is a keyword, this value indicates which keyword. Otherwise, this value is RID_MAX. */ ENUM_BITFIELD (rid, keyword, 8); /* If this token is a CPP_PRAGMA, this indicates the pragma that was seen. Otherwise it is PRAGMA_NONE. */ ENUM_BITFIELD (pragma_kind, pragma_kind, 8); /* The value associated with this token, if any. */ tree value; /* The location at which this token was found. */ location_t location; } c_token; /* A parser structure recording information about the state and context of parsing. Includes lexer information with up to two tokens of look-ahead; more are not needed for C. */ typedef struct GTY(()) c_parser { /* The look-ahead tokens. */ c_token tokens[2]; /* How many look-ahead tokens are available (0, 1 or 2). */ short tokens_avail; /* True if a syntax error is being recovered from; false otherwise. c_parser_error sets this flag. It should clear this flag when enough tokens have been consumed to recover from the error. */ BOOL_BITFIELD error : 1; /* True if we're processing a pragma, and shouldn't automatically consume CPP_PRAGMA_EOL. */ BOOL_BITFIELD in_pragma : 1; /* True if we're parsing the outermost block of an if statement. */ BOOL_BITFIELD in_if_block : 1; /* True if we want to lex an untranslated string. */ BOOL_BITFIELD lex_untranslated_string : 1; /* Objective-C specific parser/lexer information. */ BOOL_BITFIELD objc_pq_context : 1; /* The following flag is needed to contextualize Objective-C lexical analysis. In some cases (e.g., 'int NSObject;'), it is undesirable to bind an identifier to an Objective-C class, even if a class with that name exists. */ BOOL_BITFIELD objc_need_raw_identifier : 1; } c_parser; /* The actual parser and external interface. ??? Does this need to be garbage-collected? */ static GTY (()) c_parser *the_parser; /* Read in and lex a single token, storing it in *TOKEN. */ static void c_lex_one_token (c_parser *parser, c_token *token) { timevar_push (TV_LEX); token->type = c_lex_with_flags (&token->value, &token->location, NULL, (parser->lex_untranslated_string ? C_LEX_STRING_NO_TRANSLATE : 0)); token->id_kind = C_ID_NONE; token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; switch (token->type) { case CPP_NAME: { tree decl; bool objc_force_identifier = parser->objc_need_raw_identifier; if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; if (C_IS_RESERVED_WORD (token->value)) { enum rid rid_code = C_RID_CODE (token->value); if (rid_code == RID_CXX_COMPAT_WARN) { warning_at (token->location, OPT_Wc___compat, "identifier %qE conflicts with C++ keyword", token->value); } else if (rid_code >= RID_FIRST_ADDR_SPACE && rid_code <= RID_LAST_ADDR_SPACE) { token->id_kind = C_ID_ADDRSPACE; token->keyword = rid_code; break; } else if (c_dialect_objc ()) { if (!objc_is_reserved_word (token->value) && (!OBJC_IS_PQ_KEYWORD (rid_code) || parser->objc_pq_context)) { /* Return the canonical spelling for this keyword. */ token->value = ridpointers[(int) rid_code]; token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } else { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } decl = lookup_name (token->value); if (decl) { if (TREE_CODE (decl) == TYPE_DECL) { token->id_kind = C_ID_TYPENAME; break; } } else if (c_dialect_objc ()) { tree objc_interface_decl = objc_is_class_name (token->value); /* Objective-C class names are in the same namespace as variables and typedefs, and hence are shadowed by local declarations. */ if (objc_interface_decl && (global_bindings_p () || (!objc_force_identifier && !decl))) { token->value = objc_interface_decl; token->id_kind = C_ID_CLASSNAME; break; } } token->id_kind = C_ID_ID; } break; case CPP_AT_NAME: /* This only happens in Objective-C; it must be a keyword. */ token->type = CPP_KEYWORD; token->keyword = C_RID_CODE (token->value); break; case CPP_COLON: case CPP_COMMA: case CPP_CLOSE_PAREN: case CPP_SEMICOLON: /* These tokens may affect the interpretation of any identifiers following, if doing Objective-C. */ if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value); token->value = NULL; break; default: break; } timevar_pop (TV_LEX); } /* Return a pointer to the next token from PARSER, reading it in if necessary. */ static inline c_token * c_parser_peek_token (c_parser *parser) { if (parser->tokens_avail == 0) { c_lex_one_token (parser, &parser->tokens[0]); parser->tokens_avail = 1; } return &parser->tokens[0]; } /* Return true if the next token from PARSER has the indicated TYPE. */ static inline bool c_parser_next_token_is (c_parser *parser, enum cpp_ttype type) { return c_parser_peek_token (parser)->type == type; } /* Return true if the next token from PARSER does not have the indicated TYPE. */ static inline bool c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type) { return !c_parser_next_token_is (parser, type); } /* Return true if the next token from PARSER is the indicated KEYWORD. */ static inline bool c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword) { return c_parser_peek_token (parser)->keyword == keyword; } /* Return true if TOKEN can start a type name, false otherwise. */ static bool c_token_starts_typename (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: return true; default: return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if the next token from PARSER can start a type name, false otherwise. */ static inline bool c_parser_next_token_starts_typename (c_parser *parser) { c_token *token = c_parser_peek_token (parser); return c_token_starts_typename (token); } /* Return true if TOKEN can start declaration specifiers, false otherwise. */ static bool c_token_starts_declspecs (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: return true; default: return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if the next token from PARSER can start declaration specifiers, false otherwise. */ static inline bool c_parser_next_token_starts_declspecs (c_parser *parser) { c_token *token = c_parser_peek_token (parser); return c_token_starts_declspecs (token); } /* Return a pointer to the next-but-one token from PARSER, reading it in if necessary. The next token is already read in. */ static c_token * c_parser_peek_2nd_token (c_parser *parser) { if (parser->tokens_avail >= 2) return &parser->tokens[1]; gcc_assert (parser->tokens_avail == 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL); c_lex_one_token (parser, &parser->tokens[1]); parser->tokens_avail = 2; return &parser->tokens[1]; } /* Consume the next token from PARSER. */ static void c_parser_consume_token (c_parser *parser) { gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL); gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; } /* Expect the current token to be a #pragma. Consume it and remember that we've begun parsing a pragma. */ static void c_parser_consume_pragma (c_parser *parser) { gcc_assert (!parser->in_pragma); gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type == CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; parser->in_pragma = true; } /* Update the globals input_location and in_system_header from TOKEN. */ static inline void c_parser_set_source_position_from_token (c_token *token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* Issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream of PARSER. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". Do not issue a diagnostic if still recovering from an error. ??? This is taken from the C++ parser, but building up messages in this way is not i18n-friendly and some other approach should be used. */ static void c_parser_error (c_parser *parser, const char *gmsgid) { c_token *token = c_parser_peek_token (parser); if (parser->error) return; parser->error = true; if (!gmsgid) return; /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ c_parser_set_source_position_from_token (token); c_parse_error (gmsgid, /* Because c_parse_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), /* ??? The C parser does not save the cpp flags of a token, we need to pass 0 here and we will not get the source spelling of some tokens but rather the canonical spelling. */ token->value, /*flags=*/0); } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue the error MSGID. If MSGID is NULL then a message has already been produced and no message will be produced this time. Returns true if found, false otherwise. */ static bool c_parser_require (c_parser *parser, enum cpp_ttype type, const char *msgid) { if (c_parser_next_token_is (parser, type)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* If the next token is the indicated keyword, consume it. Otherwise, issue the error MSGID. Returns true if found, false otherwise. */ static bool c_parser_require_keyword (c_parser *parser, enum rid keyword, const char *msgid) { if (c_parser_next_token_is_keyword (parser, keyword)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* Like c_parser_require, except that tokens will be skipped until the desired token is found. An error message is still produced if the next token is not as expected. If MSGID is NULL then a message has already been produced and no message will be produced this time. */ static void c_parser_skip_until_found (c_parser *parser, enum cpp_ttype type, const char *msgid) { unsigned nesting_depth = 0; if (c_parser_require (parser, type, msgid)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ c_token *token = c_parser_peek_token (parser); /* If we've reached the token we want, consume it and stop. */ if (token->type == type && !nesting_depth) { c_parser_consume_token (parser); break; } /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until the end of a parameter is found, but do not consume the comma, semicolon or closing delimiter. */ static void c_parser_skip_to_end_of_parameter (c_parser *parser) { unsigned nesting_depth = 0; while (true) { c_token *token = c_parser_peek_token (parser); if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON) && !nesting_depth) break; /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Expect to be at the end of the pragma directive and consume an end of line marker. */ static void c_parser_skip_to_pragma_eol (c_parser *parser) { gcc_assert (parser->in_pragma); parser->in_pragma = false; if (!c_parser_require (parser, CPP_PRAGMA_EOL, "expected end of line")) while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) break; if (token->type == CPP_PRAGMA_EOL) { c_parser_consume_token (parser); break; } c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested ';'. */ static void c_parser_skip_to_end_of_block_or_statement (c_parser *parser) { unsigned nesting_depth = 0; bool save_error = parser->error; while (true) { c_token *token; /* Peek at the next token. */ token = c_parser_peek_token (parser); switch (token->type) { case CPP_EOF: return; case CPP_PRAGMA_EOL: if (parser->in_pragma) return; break; case CPP_SEMICOLON: /* If the next token is a ';', we have reached the end of the statement. */ if (!nesting_depth) { /* Consume the ';'. */ c_parser_consume_token (parser); goto finished; } break; case CPP_CLOSE_BRACE: /* If the next token is a non-nested '}', then we have reached the end of the current block. */ if (nesting_depth == 0 || --nesting_depth == 0) { c_parser_consume_token (parser); goto finished; } break; case CPP_OPEN_BRACE: /* If it the next token is a '{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; case CPP_PRAGMA: /* If we see a pragma, consume the whole thing at once. We have some safeguards against consuming pragmas willy-nilly. Normally, we'd expect to be here with parser->error set, which disables these safeguards. But it's possible to get here for secondary error recovery, after parser->error has been cleared. */ c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); parser->error = save_error; continue; default: break; } c_parser_consume_token (parser); } finished: parser->error = false; } /* CPP's options (initialized by c-opts.c). */ extern cpp_options *cpp_opts; /* Save the warning flags which are controlled by __extension__. */ static inline int disable_extension_diagnostics (void) { int ret = (pedantic | (warn_pointer_arith << 1) | (warn_traditional << 2) | (flag_iso << 3) | (warn_long_long << 4) | (warn_cxx_compat << 5)); cpp_opts->pedantic = pedantic = 0; warn_pointer_arith = 0; cpp_opts->warn_traditional = warn_traditional = 0; flag_iso = 0; cpp_opts->warn_long_long = warn_long_long = 0; warn_cxx_compat = 0; return ret; } /* Restore the warning flags which are controlled by __extension__. FLAGS is the return value from disable_extension_diagnostics. */ static inline void restore_extension_diagnostics (int flags) { cpp_opts->pedantic = pedantic = flags & 1; warn_pointer_arith = (flags >> 1) & 1; cpp_opts->warn_traditional = warn_traditional = (flags >> 2) & 1; flag_iso = (flags >> 3) & 1; cpp_opts->warn_long_long = warn_long_long = (flags >> 4) & 1; warn_cxx_compat = (flags >> 5) & 1; } /* Possibly kinds of declarator to parse. */ typedef enum c_dtr_syn { /* A normal declarator with an identifier. */ C_DTR_NORMAL, /* An abstract declarator (maybe empty). */ C_DTR_ABSTRACT, /* A parameter declarator: may be either, but after a type name does not redeclare a typedef name as an identifier if it can alternatively be interpreted as a typedef name; see DR#009, applied in C90 TC1, omitted from C99 and reapplied in C99 TC2 following DR#249. For example, given a typedef T, "int T" and "int *T" are valid parameter declarations redeclaring T, while "int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are abstract declarators rather than involving redundant parentheses; the same applies with attributes inside the parentheses before "T". */ C_DTR_PARM } c_dtr_syn; static void c_parser_external_declaration (c_parser *); static void c_parser_asm_definition (c_parser *); static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool, bool); static void c_parser_declspecs (c_parser *, struct c_declspecs *, bool, bool, bool); static struct c_typespec c_parser_enum_specifier (c_parser *); static struct c_typespec c_parser_struct_or_union_specifier (c_parser *); static tree c_parser_struct_declaration (c_parser *); static struct c_typespec c_parser_typeof_specifier (c_parser *); static struct c_declarator *c_parser_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator_inner (c_parser *, bool, struct c_declarator *); static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree); static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree); static struct c_parm *c_parser_parameter_declaration (c_parser *, tree); static tree c_parser_simple_asm_expr (c_parser *); static tree c_parser_attributes (c_parser *); static struct c_type_name *c_parser_type_name (c_parser *); static struct c_expr c_parser_initializer (c_parser *); static struct c_expr c_parser_braced_init (c_parser *, tree, bool); static void c_parser_initelt (c_parser *); static void c_parser_initval (c_parser *, struct c_expr *); static tree c_parser_compound_statement (c_parser *); static void c_parser_compound_statement_nostart (c_parser *); static void c_parser_label (c_parser *); static void c_parser_statement (c_parser *); static void c_parser_statement_after_labels (c_parser *); static void c_parser_if_statement (c_parser *); static void c_parser_switch_statement (c_parser *); static void c_parser_while_statement (c_parser *); static void c_parser_do_statement (c_parser *); static void c_parser_for_statement (c_parser *); static tree c_parser_asm_statement (c_parser *); static tree c_parser_asm_operands (c_parser *, bool); static tree c_parser_asm_goto_operands (c_parser *); static tree c_parser_asm_clobbers (c_parser *); static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *); static struct c_expr c_parser_conditional_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_unary_expression (c_parser *); static struct c_expr c_parser_sizeof_expression (c_parser *); static struct c_expr c_parser_alignof_expression (c_parser *); static struct c_expr c_parser_postfix_expression (c_parser *); static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *, struct c_type_name *, location_t); static struct c_expr c_parser_postfix_expression_after_primary (c_parser *, location_t loc, struct c_expr); static struct c_expr c_parser_expression (c_parser *); static struct c_expr c_parser_expression_conv (c_parser *); static VEC(tree,gc) *c_parser_expr_list (c_parser *, bool, bool, VEC(tree,gc) **); static void c_parser_omp_construct (c_parser *); static void c_parser_omp_threadprivate (c_parser *); static void c_parser_omp_barrier (c_parser *); static void c_parser_omp_flush (c_parser *); static void c_parser_omp_taskwait (c_parser *); enum pragma_context { pragma_external, pragma_stmt, pragma_compound }; static bool c_parser_pragma (c_parser *, enum pragma_context); /* These Objective-C parser functions are only ever called when compiling Objective-C. */ static void c_parser_objc_class_definition (c_parser *); static void c_parser_objc_class_instance_variables (c_parser *); static void c_parser_objc_class_declaration (c_parser *); static void c_parser_objc_alias_declaration (c_parser *); static void c_parser_objc_protocol_definition (c_parser *); static enum tree_code c_parser_objc_method_type (c_parser *); static void c_parser_objc_method_definition (c_parser *); static void c_parser_objc_methodprotolist (c_parser *); static void c_parser_objc_methodproto (c_parser *); static tree c_parser_objc_method_decl (c_parser *); static tree c_parser_objc_type_name (c_parser *); static tree c_parser_objc_protocol_refs (c_parser *); static void c_parser_objc_try_catch_statement (c_parser *); static void c_parser_objc_synchronized_statement (c_parser *); static tree c_parser_objc_selector (c_parser *); static tree c_parser_objc_selector_arg (c_parser *); static tree c_parser_objc_receiver (c_parser *); static tree c_parser_objc_message_args (c_parser *); static tree c_parser_objc_keywordexpr (c_parser *); /* Parse a translation unit (C90 6.7, C99 6.9). translation-unit: external-declarations external-declarations: external-declaration external-declarations external-declaration GNU extensions: translation-unit: empty */ static void c_parser_translation_unit (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_EOF)) { pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C forbids an empty translation unit"); } else { void *obstack_position = obstack_alloc (&parser_obstack, 0); mark_valid_location_for_stdc_pragma (false); do { ggc_collect (); c_parser_external_declaration (parser); obstack_free (&parser_obstack, obstack_position); } while (c_parser_next_token_is_not (parser, CPP_EOF)); } } /* Parse an external declaration (C90 6.7, C99 6.9). external-declaration: function-definition declaration GNU extensions: external-declaration: asm-definition ; __extension__ external-declaration Objective-C: external-declaration: objc-class-definition objc-class-declaration objc-alias-declaration objc-protocol-definition objc-method-definition @end */ static void c_parser_external_declaration (c_parser *parser) { int ext; switch (c_parser_peek_token (parser)->type) { case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_EXTENSION: ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_external_declaration (parser); restore_extension_diagnostics (ext); break; case RID_ASM: c_parser_asm_definition (parser); break; case RID_AT_INTERFACE: case RID_AT_IMPLEMENTATION: gcc_assert (c_dialect_objc ()); c_parser_objc_class_definition (parser); break; case RID_CLASS: gcc_assert (c_dialect_objc ()); c_parser_objc_class_declaration (parser); break; case RID_AT_ALIAS: gcc_assert (c_dialect_objc ()); c_parser_objc_alias_declaration (parser); break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_objc_protocol_definition (parser); break; case RID_AT_END: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); objc_finish_implementation (); break; default: goto decl_or_fndef; } break; case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PRAGMA: mark_valid_location_for_stdc_pragma (true); c_parser_pragma (parser, pragma_external); mark_valid_location_for_stdc_pragma (false); break; case CPP_PLUS: case CPP_MINUS: if (c_dialect_objc ()) { c_parser_objc_method_definition (parser); break; } /* Else fall through, and yield a syntax error trying to parse as a declaration or function definition. */ default: decl_or_fndef: /* A declaration or a function definition. We can only tell which after parsing the declaration specifiers, if any, and the first declarator. */ c_parser_declaration_or_fndef (parser, true, true, false, true); break; } } /* Parse a declaration or function definition (C90 6.5, 6.7.1, C99 6.7, 6.9.1). If FNDEF_OK is true, a function definition is accepted; otherwise (old-style parameter declarations) only other declarations are accepted. If NESTED is true, we are inside a function or parsing old-style parameter declarations; any functions encountered are nested functions and declaration specifiers are required; otherwise we are at top level and functions are normal functions and declaration specifiers may be optional. If EMPTY_OK is true, empty declarations are OK (subject to all other constraints); otherwise (old-style parameter declarations) they are diagnosed. If START_ATTR_OK is true, the declaration specifiers may start with attributes; otherwise they may not. declaration: declaration-specifiers init-declarator-list[opt] ; function-definition: declaration-specifiers[opt] declarator declaration-list[opt] compound-statement declaration-list: declaration declaration-list declaration init-declarator-list: init-declarator init-declarator-list , init-declarator init-declarator: declarator simple-asm-expr[opt] attributes[opt] declarator simple-asm-expr[opt] attributes[opt] = initializer GNU extensions: nested-function-definition: declaration-specifiers declarator declaration-list[opt] compound-statement The simple-asm-expr and attributes are GNU extensions. This function does not handle __extension__; that is handled in its callers. ??? Following the old parser, __extension__ may start external declarations, declarations in functions and declarations at the start of "for" loops, but not old-style parameter declarations. C99 requires declaration specifiers in a function definition; the absence is diagnosed through the diagnosis of implicit int. In GNU C we also allow but diagnose declarations without declaration specifiers, but only at top level (elsewhere they conflict with other syntax). OpenMP: declaration: threadprivate-directive */ static void c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, bool empty_ok, bool nested, bool start_attr_ok) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; bool diagnosed_no_specs = false; location_t here = c_parser_peek_token (parser)->location; specs = build_null_declspecs (); c_parser_declspecs (parser, specs, true, true, start_attr_ok); if (parser->error) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (nested && !specs->declspecs_seen_p) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_end_of_block_or_statement (parser); return; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (empty_ok) shadow_tag (specs); else { shadow_tag_warned (specs, 1); pedwarn (here, 0, "empty declaration"); } c_parser_consume_token (parser); return; } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; while (true) { struct c_declarator *declarator; bool dummy = false; tree fnbody; /* Declaring either one or more declarators (in which case we should diagnose if there were no declaration specifiers) or a function definition (in which case the diagnostic for implicit int suffices). */ declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (c_parser_next_token_is (parser, CPP_EQ) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is_keyword (parser, RID_ASM) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree asm_name = NULL_TREE; tree postfix_attrs = NULL_TREE; if (!diagnosed_no_specs && !specs->declspecs_seen_p) { diagnosed_no_specs = true; pedwarn (here, 0, "data definition has no type or storage class"); } /* Having seen a data definition, there cannot now be a function definition. */ fndef_ok = false; if (c_parser_next_token_is_keyword (parser, RID_ASM)) asm_name = c_parser_simple_asm_expr (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { tree d; struct c_expr init; location_t init_loc; c_parser_consume_token (parser); /* The declaration of the variable is in effect while its initializer is parsed. */ d = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; start_init (d, asm_name, global_bindings_p ()); init_loc = c_parser_peek_token (parser)->location; init = c_parser_initializer (parser); finish_init (); if (d != error_mark_node) { maybe_warn_string_init (TREE_TYPE (d), init); finish_decl (d, init_loc, init.value, init.original_type, asm_name); } } else { tree d = start_decl (declarator, specs, false, chainon (postfix_attrs, all_prefix_attrs)); if (d) finish_decl (d, UNKNOWN_LOCATION, NULL_TREE, NULL_TREE, asm_name); } if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; continue; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); return; } else { c_parser_error (parser, "expected %<,%> or %<;%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } } else if (!fndef_ok) { c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, " "%<asm%> or %<__attribute__%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } /* Function definition (nested or otherwise). */ if (nested) { pedwarn (here, OPT_pedantic, "ISO C forbids nested functions"); c_push_function_context (); } if (!start_function (specs, declarator, all_prefix_attrs)) { /* This can appear in many cases looking nothing like a function definition, so we don't give a more specific error suggesting there was one. */ c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> " "or %<__attribute__%>"); if (nested) c_pop_function_context (); break; } /* Parse old-style parameter declarations. ??? Attributes are not allowed to start declaration specifiers here because of a syntax conflict between a function declaration with attribute suffix and a function definition with an attribute prefix on first old-style parameter declaration. Following the old parser, they are not accepted on subsequent old-style parameter declarations either. However, there is no ambiguity after the first declaration, nor indeed on the first as long as we don't allow postfix attributes after a declarator with a nonempty identifier list in a definition; and postfix attributes have never been accepted here in function definitions either. */ while (c_parser_next_token_is_not (parser, CPP_EOF) && c_parser_next_token_is_not (parser, CPP_OPEN_BRACE)) c_parser_declaration_or_fndef (parser, false, false, true, false); store_parm_decls (); DECL_STRUCT_FUNCTION (current_function_decl)->function_start_locus = c_parser_peek_token (parser)->location; fnbody = c_parser_compound_statement (parser); if (nested) { tree decl = current_function_decl; /* Mark nested functions as needing static-chain initially. lower_nested_functions will recompute it but the DECL_STATIC_CHAIN flag is also used before that happens, by initializer_constant_valid_p. See gcc.dg/nested-fn-2.c. */ DECL_STATIC_CHAIN (decl) = 1; add_stmt (fnbody); finish_function (); c_pop_function_context (); add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } else { add_stmt (fnbody); finish_function (); } break; } } /* Parse an asm-definition (asm() outside a function body). This is a GNU extension. asm-definition: simple-asm-expr ; */ static void c_parser_asm_definition (c_parser *parser) { tree asm_str = c_parser_simple_asm_expr (parser); if (asm_str) cgraph_add_asm_node (asm_str); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse some declaration specifiers (possibly none) (C90 6.5, C99 6.7), adding them to SPECS (which may already include some). Storage class specifiers are accepted iff SCSPEC_OK; type specifiers are accepted iff TYPESPEC_OK; attributes are accepted at the start iff START_ATTR_OK. declaration-specifiers: storage-class-specifier declaration-specifiers[opt] type-specifier declaration-specifiers[opt] type-qualifier declaration-specifiers[opt] function-specifier declaration-specifiers[opt] Function specifiers (inline) are from C99, and are currently handled as storage class specifiers, as is __thread. C90 6.5.1, C99 6.7.1: storage-class-specifier: typedef extern static auto register C99 6.7.4: function-specifier: inline C90 6.5.2, C99 6.7.2: type-specifier: void char short int long float double signed unsigned _Bool _Complex [_Imaginary removed in C99 TC2] struct-or-union-specifier enum-specifier typedef-name (_Bool and _Complex are new in C99.) C90 6.5.3, C99 6.7.3: type-qualifier: const restrict volatile address-space-qualifier (restrict is new in C99.) GNU extensions: declaration-specifiers: attributes declaration-specifiers[opt] type-qualifier: address-space address-space: identifier recognized by the target storage-class-specifier: __thread type-specifier: typeof-specifier _Decimal32 _Decimal64 _Decimal128 _Fract _Accum _Sat (_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf) Objective-C: type-specifier: class-name objc-protocol-refs[opt] typedef-name objc-protocol-refs objc-protocol-refs */ static void c_parser_declspecs (c_parser *parser, struct c_declspecs *specs, bool scspec_ok, bool typespec_ok, bool start_attr_ok) { bool attrs_ok = start_attr_ok; bool seen_type = specs->type_seen_p; while (c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD) || (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS))) { struct c_typespec t; tree attrs; location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_NAME)) { tree value = c_parser_peek_token (parser)->value; c_id_kind kind = c_parser_peek_token (parser)->id_kind; if (kind == C_ID_ADDRSPACE) { addr_space_t as = c_parser_peek_token (parser)->keyword - RID_FIRST_ADDR_SPACE; declspecs_add_addrspace (specs, as); c_parser_consume_token (parser); attrs_ok = true; continue; } /* This finishes the specifiers unless a type name is OK, it is declared as a type name and a type name hasn't yet been seen. */ if (!typespec_ok || seen_type || (kind != C_ID_TYPENAME && kind != C_ID_CLASSNAME)) break; c_parser_consume_token (parser); seen_type = true; attrs_ok = true; if (kind == C_ID_TYPENAME && (!c_dialect_objc () || c_parser_next_token_is_not (parser, CPP_LESS))) { t.kind = ctsk_typedef; /* For a typedef name, record the meaning, not the name. In case of 'foo foo, bar;'. */ t.spec = lookup_name (value); t.expr = NULL_TREE; t.expr_const_operands = true; } else { tree proto = NULL_TREE; gcc_assert (c_dialect_objc ()); t.kind = ctsk_objc; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); t.spec = objc_get_protocol_qualified_type (value, proto); t.expr = NULL_TREE; t.expr_const_operands = true; } declspecs_add_type (loc, specs, t); continue; } if (c_parser_next_token_is (parser, CPP_LESS)) { /* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" - nisse@lysator.liu.se. */ tree proto; gcc_assert (c_dialect_objc ()); if (!typespec_ok || seen_type) break; proto = c_parser_objc_protocol_refs (parser); t.kind = ctsk_objc; t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto); t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); continue; } gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD)); switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: if (!scspec_ok) goto out; attrs_ok = true; /* TODO: Distinguish between function specifiers (inline) and storage class specifiers, either here or in declspecs_add_scspec. */ declspecs_add_scspec (specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; if (c_dialect_objc ()) parser->objc_need_raw_identifier = true; t.kind = ctsk_resword; t.spec = c_parser_peek_token (parser)->value; t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); c_parser_consume_token (parser); break; case RID_ENUM: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_enum_specifier (parser); declspecs_add_type (loc, specs, t); break; case RID_STRUCT: case RID_UNION: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_struct_or_union_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec); declspecs_add_type (loc, specs, t); break; case RID_TYPEOF: /* ??? The old parser rejected typeof after other type specifiers, but is a syntax error the best way of handling this? */ if (!typespec_ok || seen_type) goto out; attrs_ok = true; seen_type = true; t = c_parser_typeof_specifier (parser); declspecs_add_type (loc, specs, t); break; case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: attrs_ok = true; declspecs_add_qual (specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_ATTRIBUTE: if (!attrs_ok) goto out; attrs = c_parser_attributes (parser); declspecs_add_attrs (specs, attrs); break; default: goto out; } } out: ; } /* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2). enum-specifier: enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt] enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt] enum attributes[opt] identifier The form with trailing comma is new in C99. The forms with attributes are GNU extensions. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. enumerator-list: enumerator enumerator-list , enumerator enumerator: enumeration-constant enumeration-constant = constant-expression */ static struct c_typespec c_parser_enum_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t enum_loc; location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */ gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM)); enum_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); enum_loc = c_parser_peek_token (parser)->location; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; enum_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse an enum definition. */ struct c_enum_contents the_enum; tree type = start_enum (enum_loc, &the_enum, ident); tree postfix_attrs; /* We chain the enumerators in reverse order, then put them in forward order at the end. */ tree values = NULL_TREE; c_parser_consume_token (parser); while (true) { tree enum_id; tree enum_value; tree enum_decl; bool seen_comma; c_token *token; location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */ location_t value_loc; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } token = c_parser_peek_token (parser); enum_id = token->value; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (token); value_loc = token->location; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { c_parser_consume_token (parser); value_loc = c_parser_peek_token (parser)->location; enum_value = c_parser_expr_no_commas (parser, NULL).value; } else enum_value = NULL_TREE; enum_decl = build_enumerator (value_loc, &the_enum, enum_id, enum_value); TREE_CHAIN (enum_decl) = values; values = enum_decl; seen_comma = false; if (c_parser_next_token_is (parser, CPP_COMMA)) { comma_loc = c_parser_peek_token (parser)->location; seen_comma = true; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { if (seen_comma && !flag_isoc99) pedwarn (comma_loc, OPT_pedantic, "comma at end of enumerator list"); c_parser_consume_token (parser); break; } if (!seen_comma) { c_parser_error (parser, "expected %<,%> or %<}%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_enum (type, nreverse (values), chainon (attrs, postfix_attrs)); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, ENUMERAL_TYPE, ident); /* In ISO C, enumerated types can be referred to only if already defined. */ if (pedantic && !COMPLETE_TYPE_P (ret.spec)) { gcc_assert (ident); pedwarn (enum_loc, OPT_pedantic, "ISO C forbids forward references to %<enum%> types"); } return ret; } /* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1). struct-or-union-specifier: struct-or-union attributes[opt] identifier[opt] { struct-contents } attributes[opt] struct-or-union attributes[opt] identifier struct-contents: struct-declaration-list struct-declaration-list: struct-declaration ; struct-declaration-list struct-declaration ; GNU extensions: struct-contents: empty struct-declaration struct-declaration-list struct-declaration struct-declaration-list: struct-declaration-list ; ; (Note that in the syntax here, unlike that in ISO C, the semicolons are included here rather than in struct-declaration, in order to describe the syntax with extra semicolons and missing semicolon at end.) Objective-C: struct-declaration-list: @defs ( class-name ) (Note this does not include a trailing semicolon, but can be followed by further declarations, and gets a pedwarn-if-pedantic when followed by a semicolon.) */ static struct c_typespec c_parser_struct_or_union_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t struct_loc; location_t ident_loc = UNKNOWN_LOCATION; enum tree_code code; switch (c_parser_peek_token (parser)->keyword) { case RID_STRUCT: code = RECORD_TYPE; break; case RID_UNION: code = UNION_TYPE; break; default: gcc_unreachable (); } struct_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; struct_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse a struct or union definition. Start the scope of the tag before parsing components. */ struct c_struct_parse_info *struct_info; tree type = start_struct (struct_loc, code, ident, &struct_info); tree postfix_attrs; /* We chain the components in reverse order, then put them in forward order at the end. Each struct-declaration may declare multiple components (comma-separated), so we must use chainon to join them, although when parsing each struct-declaration we can use TREE_CHAIN directly. The theory behind all this is that there will be more semicolon separated fields than comma separated fields, and so we'll be minimizing the number of node traversals required by chainon. */ tree contents = NULL_TREE; c_parser_consume_token (parser); /* Handle the Objective-C @defs construct, e.g. foo(sizeof(struct{ @defs(ClassName) }));. */ if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS)) { tree name; gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto end_at_defs; if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else { c_parser_error (parser, "expected class name"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto end_at_defs; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); contents = nreverse (objc_get_class_ivars (name)); } end_at_defs: /* Parse the struct-declarations and semicolons. Problems with semicolons are diagnosed here; empty structures are diagnosed elsewhere. */ while (true) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the struct or union contents. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Accept #pragmas at struct scope. */ if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external); continue; } /* Parse some comma-separated declarations, but not the trailing semicolon if any. */ decls = c_parser_struct_declaration (parser); contents = chainon (decls, contents); /* If no semicolon follows, either we have a parse error or are at the end of the struct or union and should pedwarn. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) pedwarn (c_parser_peek_token (parser)->location, 0, "no semicolon at end of struct or union"); else { c_parser_error (parser, "expected %<;%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); break; } } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_struct (struct_loc, type, nreverse (contents), chainon (attrs, postfix_attrs), struct_info); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, code, ident); return ret; } /* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without* the trailing semicolon. struct-declaration: specifier-qualifier-list struct-declarator-list specifier-qualifier-list: type-specifier specifier-qualifier-list[opt] type-qualifier specifier-qualifier-list[opt] attributes specifier-qualifier-list[opt] struct-declarator-list: struct-declarator struct-declarator-list , attributes[opt] struct-declarator struct-declarator: declarator attributes[opt] declarator[opt] : constant-expression attributes[opt] GNU extensions: struct-declaration: __extension__ struct-declaration specifier-qualifier-list Unlike the ISO C syntax, semicolons are handled elsewhere. The use of attributes where shown is a GNU extension. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. */ static tree c_parser_struct_declaration (c_parser *parser) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; tree decls; location_t decl_loc; if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { int ext; tree decl; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); decl = c_parser_struct_declaration (parser); restore_extension_diagnostics (ext); return decl; } specs = build_null_declspecs (); decl_loc = c_parser_peek_token (parser)->location; c_parser_declspecs (parser, specs, false, true, true); if (parser->error) return NULL_TREE; if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL_TREE; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree ret; if (!specs->type_seen_p) { pedwarn (decl_loc, OPT_pedantic, "ISO C forbids member declarations with no members"); shadow_tag_warned (specs, pedantic); ret = NULL_TREE; } else { /* Support for unnamed structs or unions as members of structs or unions (which is [a] useful and [b] supports MS P-SDK). */ tree attrs = NULL; ret = grokfield (c_parser_peek_token (parser)->location, build_id_declarator (NULL_TREE), specs, NULL_TREE, &attrs); if (ret) decl_attributes (&ret, attrs, 0); } return ret; } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; decls = NULL_TREE; while (true) { /* Declaring one or more declarators or un-named bit-fields. */ struct c_declarator *declarator; bool dummy = false; if (c_parser_next_token_is (parser, CPP_COLON)) declarator = build_id_declarator (NULL_TREE); else declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); break; } if (c_parser_next_token_is (parser, CPP_COLON) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree postfix_attrs = NULL_TREE; tree width = NULL_TREE; tree d; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); width = c_parser_expr_no_commas (parser, NULL).value; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); d = grokfield (c_parser_peek_token (parser)->location, declarator, specs, width, &all_prefix_attrs); decl_attributes (&d, chainon (postfix_attrs, all_prefix_attrs), 0); TREE_CHAIN (d) = decls; decls = d; if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { /* Semicolon consumed in caller. */ break; } else { c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>"); break; } } else { c_parser_error (parser, "expected %<:%>, %<,%>, %<;%>, %<}%> or " "%<__attribute__%>"); break; } } return decls; } /* Parse a typeof specifier (a GNU extension). typeof-specifier: typeof ( expression ) typeof ( type-name ) */ static struct c_typespec c_parser_typeof_specifier (c_parser *parser) { struct c_typespec ret; ret.kind = ctsk_typeof; ret.spec = error_mark_node; ret.expr = NULL_TREE; ret.expr_const_operands = true; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_typeof++; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_inhibit_evaluation_warnings--; in_typeof--; return ret; } if (c_parser_next_token_starts_typename (parser)) { struct c_type_name *type = c_parser_type_name (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (type != NULL) { ret.spec = groktypename (type, &ret.expr, &ret.expr_const_operands); pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE)); } } else { bool was_vm; location_t here = c_parser_peek_token (parser)->location; struct c_expr expr = c_parser_expression (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (here, "%<typeof%> applied to a bit-field"); ret.spec = TREE_TYPE (expr.value); was_vm = variably_modified_type_p (ret.spec, NULL_TREE); /* This is returned with the type so that when the type is evaluated, this can be evaluated. */ if (was_vm) ret.expr = c_fully_fold (expr.value, false, &ret.expr_const_operands); pop_maybe_used (was_vm); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return ret; } /* Parse a declarator, possibly an abstract declarator (C90 6.5.4, 6.5.5, C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may be redeclared; otherwise it may not. KIND indicates which kind of declarator is wanted. Returns a valid declarator except in the case of a syntax error in which case NULL is returned. *SEEN_ID is set to true if an identifier being declared is seen; this is used to diagnose bad forms of abstract array declarators and to determine whether an identifier list is syntactically permitted. declarator: pointer[opt] direct-declarator direct-declarator: identifier ( attributes[opt] declarator ) direct-declarator array-declarator direct-declarator ( parameter-type-list ) direct-declarator ( identifier-list[opt] ) pointer: * type-qualifier-list[opt] * type-qualifier-list[opt] pointer type-qualifier-list: type-qualifier attributes type-qualifier-list type-qualifier type-qualifier-list attributes parameter-type-list: parameter-list parameter-list , ... parameter-list: parameter-declaration parameter-list , parameter-declaration parameter-declaration: declaration-specifiers declarator attributes[opt] declaration-specifiers abstract-declarator[opt] attributes[opt] identifier-list: identifier identifier-list , identifier abstract-declarator: pointer pointer[opt] direct-abstract-declarator direct-abstract-declarator: ( attributes[opt] abstract-declarator ) direct-abstract-declarator[opt] array-declarator direct-abstract-declarator[opt] ( parameter-type-list[opt] ) GNU extensions: direct-declarator: direct-declarator ( parameter-forward-declarations parameter-type-list[opt] ) direct-abstract-declarator: direct-abstract-declarator[opt] ( parameter-forward-declarations parameter-type-list[opt] ) parameter-forward-declarations: parameter-list ; parameter-forward-declarations parameter-list ; The uses of attributes shown above are GNU extensions. Some forms of array declarator are not included in C99 in the syntax for abstract declarators; these are disallowed elsewhere. This may be a defect (DR#289). This function also accepts an omitted abstract declarator as being an abstract declarator, although not part of the formal syntax. */ static struct c_declarator * c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* Parse any initial pointer part. */ if (c_parser_next_token_is (parser, CPP_MULT)) { struct c_declspecs *quals_attrs = build_null_declspecs (); struct c_declarator *inner; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true); inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner == NULL) return NULL; else return make_pointer_declarator (quals_attrs, inner); } /* Now we have a direct declarator, direct abstract declarator or nothing (which counts as a direct abstract declarator here). */ return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id); } /* Parse a direct declarator or direct abstract declarator; arguments as c_parser_declarator. */ static struct c_declarator * c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* The direct declarator must start with an identifier (possibly omitted) or a parenthesized declarator (possibly abstract). In an ordinary declarator, initial parentheses must start a parenthesized declarator. In an abstract declarator or parameter declarator, they could start a parenthesized declarator or a parameter list. To tell which, the open parenthesis and any following attributes must be read. If a declaration specifier follows, then it is a parameter list; if the specifier is a typedef name, there might be an ambiguity about redeclaring it, which is resolved in the direction of treating it as a typedef name. If a close parenthesis follows, it is also an empty parameter list, as the syntax does not permit empty abstract declarators. Otherwise, it is a parenthesized declarator (in which case the analysis may be repeated inside it, recursively). ??? There is an ambiguity in a parameter declaration "int (__attribute__((foo)) x)", where x is not a typedef name: it could be an abstract declarator for a function, or declare x with parentheses. The proper resolution of this ambiguity needs documenting. At present we follow an accident of the old parser's implementation, whereby the first parameter must have some declaration specifiers other than just attributes. Thus as a parameter declaration it is treated as a parenthesized parameter named x, and as an abstract declarator it is rejected. ??? Also following the old parser, attributes inside an empty parameter list are ignored, making it a list not yielding a prototype, rather than giving an error or making it have one parameter with implicit type int. ??? Also following the old parser, typedef names may be redeclared in declarators, but not Objective-C class names. */ if (kind != C_DTR_ABSTRACT && c_parser_next_token_is (parser, CPP_NAME) && ((type_seen_p && c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME) || c_parser_peek_token (parser)->id_kind == C_ID_ID)) { struct c_declarator *inner = build_id_declarator (c_parser_peek_token (parser)->value); *seen_id = true; inner->id_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } if (kind != C_DTR_NORMAL && c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { struct c_declarator *inner = build_id_declarator (NULL_TREE); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } /* Either we are at the end of an abstract declarator, or we have parentheses. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_declarator *inner; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); if (kind != C_DTR_NORMAL && (c_parser_next_token_starts_declspecs (parser) || c_parser_next_token_is (parser, CPP_CLOSE_PAREN))) { struct c_arg_info *args = c_parser_parms_declarator (parser, kind == C_DTR_NORMAL, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, build_id_declarator (NULL_TREE)); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } } /* A parenthesized declarator. */ inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner != NULL && attrs != NULL) inner = build_attrs_declarator (attrs, inner); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (inner == NULL) return NULL; else return c_parser_direct_declarator_inner (parser, *seen_id, inner); } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } else { if (kind == C_DTR_NORMAL) { c_parser_error (parser, "expected identifier or %<(%>"); return NULL; } else return build_id_declarator (NULL_TREE); } } /* Parse part of a direct declarator or direct abstract declarator, given that some (in INNER) has already been parsed; ID_PRESENT is true if an identifier is present, false for an abstract declarator. */ static struct c_declarator * c_parser_direct_declarator_inner (c_parser *parser, bool id_present, struct c_declarator *inner) { /* Parse a sequence of array declarators and parameter lists. */ if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { location_t brace_loc = c_parser_peek_token (parser)->location; struct c_declarator *declarator; struct c_declspecs *quals_attrs = build_null_declspecs (); bool static_seen; bool star_seen; tree dimen; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true); static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC); if (static_seen) c_parser_consume_token (parser); if (static_seen && !quals_attrs->declspecs_seen_p) c_parser_declspecs (parser, quals_attrs, false, false, true); if (!quals_attrs->declspecs_seen_p) quals_attrs = NULL; /* If "static" is present, there must be an array dimension. Otherwise, there may be a dimension, "*", or no dimension. */ if (static_seen) { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } else { if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { dimen = NULL_TREE; star_seen = false; } else if (c_parser_next_token_is (parser, CPP_MULT)) { if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE) { dimen = NULL_TREE; star_seen = true; c_parser_consume_token (parser); } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } } if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) c_parser_consume_token (parser); else { c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return NULL; } declarator = build_array_declarator (brace_loc, dimen, quals_attrs, static_seen, star_seen); if (declarator == NULL) return NULL; inner = set_array_declarator_inner (declarator, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_arg_info *args; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); args = c_parser_parms_declarator (parser, id_present, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } } return inner; } /* Parse a parameter list or identifier list, including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. ID_LIST_OK is true if an identifier list is acceptable; such a list must not have attributes at the start. */ static struct c_arg_info * c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs) { push_scope (); declare_parm_level (); /* If the list starts with an identifier, it is an identifier list. Otherwise, it is either a prototype list or an empty list. */ if (id_list_ok && !attrs && c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree list = NULL_TREE, *nextp = &list; while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { *nextp = build_tree_list (NULL_TREE, c_parser_peek_token (parser)->value); nextp = & TREE_CHAIN (*nextp); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_error (parser, "expected identifier"); break; } } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = list; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token (parser); pop_scope (); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pop_scope (); return NULL; } } else { struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs); pop_scope (); return ret; } } /* Parse a parameter list (possibly empty), including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. */ static struct c_arg_info * c_parser_parms_list_declarator (c_parser *parser, tree attrs) { bool good_parm = false; /* ??? Following the old parser, forward parameter declarations may use abstract declarators, and if no real parameter declarations follow the forward declarations then this is not diagnosed. Also note as above that attributes are ignored as the only contents of the parentheses, or as the only contents after forward declarations. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token (parser); return ret; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; /* Suppress -Wold-style-definition for this case. */ ret->types = error_mark_node; error_at (c_parser_peek_token (parser)->location, "ISO C requires a named argument before %<...%>"); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } /* Nonempty list of parameters, either terminated with semicolon (forward declarations; recurse) or with close parenthesis (normal function) or with ", ... )" (variadic function). */ while (true) { /* Parse a parameter. */ struct c_parm *parm = c_parser_parameter_declaration (parser, attrs); attrs = NULL_TREE; if (parm != NULL) { good_parm = true; push_parm_decl (parm); } if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree new_attrs; c_parser_consume_token (parser); mark_forward_parm_decls (); new_attrs = c_parser_attributes (parser); return c_parser_parms_list_declarator (parser, new_attrs); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (good_parm) return get_parm_info (false); else { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } if (!c_parser_require (parser, CPP_COMMA, "expected %<;%>, %<,%> or %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); get_pending_sizes (); return NULL; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (good_parm) return get_parm_info (true); else { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); get_pending_sizes (); return NULL; } } } } /* Parse a parameter declaration. ATTRS are the attributes at the start of the declaration if it is the first parameter. */ static struct c_parm * c_parser_parameter_declaration (c_parser *parser, tree attrs) { struct c_declspecs *specs; struct c_declarator *declarator; tree prefix_attrs; tree postfix_attrs = NULL_TREE; bool dummy = false; if (!c_parser_next_token_starts_declspecs (parser)) { /* ??? In some Objective-C cases '...' isn't applicable so there should be a different message. */ c_parser_error (parser, "expected declaration specifiers or %<...%>"); c_parser_skip_to_end_of_parameter (parser); return NULL; } specs = build_null_declspecs (); if (attrs) { declspecs_add_attrs (specs, attrs); attrs = NULL_TREE; } c_parser_declspecs (parser, specs, true, true, true); finish_declspecs (specs); pending_xref_error (); prefix_attrs = specs->attrs; specs->attrs = NULL_TREE; declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_PARM, &dummy); if (declarator == NULL) { c_parser_skip_until_found (parser, CPP_COMMA, NULL); return NULL; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs), declarator); } /* Parse a string literal in an asm expression. It should not be translated, and wide string literals are an error although permitted by the syntax. This is a GNU extension. asm-string-literal: string-literal ??? At present, following the old parser, the caller needs to have set lex_untranslated_string to 1. It would be better to follow the C++ parser rather than using this kludge. */ static tree c_parser_asm_string_literal (c_parser *parser) { tree str; if (c_parser_next_token_is (parser, CPP_STRING)) { str = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_WSTRING)) { error_at (c_parser_peek_token (parser)->location, "wide string literal in %<asm%>"); str = build_string (1, ""); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected string literal"); str = NULL_TREE; } return str; } /* Parse a simple asm expression. This is used in restricted contexts, where a full expression with inputs and outputs does not make sense. This is a GNU extension. simple-asm-expr: asm ( asm-string-literal ) */ static tree c_parser_simple_asm_expr (c_parser *parser) { tree str; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return NULL_TREE; } str = c_parser_asm_string_literal (parser); parser->lex_untranslated_string = false; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } return str; } /* Parse (possibly empty) attributes. This is a GNU extension. attributes: empty attributes attribute attribute: __attribute__ ( ( attribute-list ) ) attribute-list: attrib attribute_list , attrib attrib: empty any-word any-word ( identifier ) any-word ( identifier , nonempty-expr-list ) any-word ( expr-list ) where the "identifier" must not be declared as a type, and "any-word" may be any identifier (including one declared as a type), a reserved word storage class specifier, type specifier or type qualifier. ??? This still leaves out most reserved keywords (following the old parser), shouldn't we include them, and why not allow identifiers declared as types to start the arguments? */ static tree c_parser_attributes (c_parser *parser) { tree attrs = NULL_TREE; while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return attrs; } if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return attrs; } /* Parse the attribute list. */ while (c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD)) { tree attr, attr_name, attr_args; VEC(tree,gc) *expr_list; if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); continue; } if (c_parser_next_token_is (parser, CPP_KEYWORD)) { /* ??? See comment above about what keywords are accepted here. */ bool ok; switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_SHORT: case RID_INLINE: case RID_VOLATILE: case RID_SIGNED: case RID_AUTO: case RID_RESTRICT: case RID_COMPLEX: case RID_THREAD: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: ok = true; break; default: ok = false; break; } if (!ok) break; /* Accept __attribute__((__const)) as __attribute__((const)) etc. */ attr_name = ridpointers[(int) c_parser_peek_token (parser)->keyword]; } else attr_name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN)) { attr = build_tree_list (attr_name, NULL_TREE); attrs = chainon (attrs, attr); continue; } c_parser_consume_token (parser); /* Parse the attribute contents. If they start with an identifier which is followed by a comma or close parenthesis, then the arguments start with that identifier; otherwise they are an expression list. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID && ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA) || (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN))) { tree arg1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = build_tree_list (NULL_TREE, arg1); else { tree tree_list; c_parser_consume_token (parser); expr_list = c_parser_expr_list (parser, false, true, NULL); tree_list = build_tree_list_vec (expr_list); attr_args = tree_cons (NULL_TREE, arg1, tree_list); release_tree_vector (expr_list); } } else { if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = NULL_TREE; else { expr_list = c_parser_expr_list (parser, false, true, NULL); attr_args = build_tree_list_vec (expr_list); release_tree_vector (expr_list); } } attr = build_tree_list (attr_name, attr_args); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } attrs = chainon (attrs, attr); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } parser->lex_untranslated_string = false; } return attrs; } /* Parse a type name (C90 6.5.5, C99 6.7.6). type-name: specifier-qualifier-list abstract-declarator[opt] */ static struct c_type_name * c_parser_type_name (c_parser *parser) { struct c_declspecs *specs = build_null_declspecs (); struct c_declarator *declarator; struct c_type_name *ret; bool dummy = false; c_parser_declspecs (parser, specs, false, true, true); if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL; } pending_xref_error (); finish_declspecs (specs); declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_ABSTRACT, &dummy); if (declarator == NULL) return NULL; ret = XOBNEW (&parser_obstack, struct c_type_name); ret->specs = specs; ret->declarator = declarator; return ret; } /* Parse an initializer (C90 6.5.7, C99 6.7.8). initializer: assignment-expression { initializer-list } { initializer-list , } initializer-list: designation[opt] initializer initializer-list , designation[opt] initializer designation: designator-list = designator-list: designator designator-list designator designator: array-designator . identifier array-designator: [ constant-expression ] GNU extensions: initializer: { } designation: array-designator identifier : array-designator: [ constant-expression ... constant-expression ] Any expression without commas is accepted in the syntax for the constant-expressions, with non-constant expressions rejected later. This function is only used for top-level initializers; for nested ones, see c_parser_initval. */ static struct c_expr c_parser_initializer (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_braced_init (parser, NULL_TREE, false); else { struct c_expr ret; location_t loc = c_parser_peek_token (parser)->location; ret = c_parser_expr_no_commas (parser, NULL); if (TREE_CODE (ret.value) != STRING_CST && TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR) ret = default_function_array_conversion (loc, ret); return ret; } } /* Parse a braced initializer list. TYPE is the type specified for a compound literal, and NULL_TREE for other initializers and for nested braced lists. NESTED_P is true for nested braced lists, false for the list of a compound literal or the list that is the top-level initializer in a declaration. */ static struct c_expr c_parser_braced_init (c_parser *parser, tree type, bool nested_p) { location_t brace_loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); if (nested_p) push_init_level (0); else really_start_incremental_init (type); if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { pedwarn (brace_loc, OPT_pedantic, "ISO C forbids empty initializer braces"); } else { /* Parse a non-empty initializer list, possibly with a trailing comma. */ while (true) { c_parser_initelt (parser); if (parser->error) break; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; } } if (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { struct c_expr ret; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<}%>"); pop_init_level (0); return ret; } c_parser_consume_token (parser); return pop_init_level (0); } /* Parse a nested initializer, including designators. */ static void c_parser_initelt (c_parser *parser) { /* Parse any designator or designator list. A single array designator may have the subsequent "=" omitted in GNU C, but a longer list or a structure member designator may not. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { /* Old-style structure member designator. */ set_init_label (c_parser_peek_token (parser)->value); /* Use the colon as the error location. */ pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_pedantic, "obsolete use of designated initializer with %<:%>"); c_parser_consume_token (parser); c_parser_consume_token (parser); } else { /* des_seen is 0 if there have been no designators, 1 if there has been a single array designator and 2 otherwise. */ int des_seen = 0; /* Location of a designator. */ location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */ while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DOT)) { int des_prev = des_seen; if (!des_seen) des_loc = c_parser_peek_token (parser)->location; if (des_seen < 2) des_seen++; if (c_parser_next_token_is (parser, CPP_DOT)) { des_seen = 2; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { set_init_label (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (init, false); return; } } else { tree first, second; location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */ /* ??? Following the old parser, [ objc-receiver objc-message-args ] is accepted as an initializer, being distinguished from a designator by what follows the first assignment expression inside the square brackets, but after a first array designator a subsequent square bracket is for Objective-C taken to start an expression, using the obsolete form of designated initializer without '=', rather than possibly being a second level of designation: in LALR terms, the '[' is shifted rather than reducing designator to designator-list. */ if (des_prev == 1 && c_dialect_objc ()) { des_seen = des_prev; break; } if (des_prev == 0 && c_dialect_objc ()) { /* This might be an array designator or an Objective-C message expression. If the former, continue parsing here; if the latter, parse the remainder of the initializer given the starting primary-expression. ??? It might make sense to distinguish when des_prev == 1 as well; see previous comment. */ tree rec, args; struct c_expr mexpr; c_parser_consume_token (parser); if (c_parser_peek_token (parser)->type == CPP_NAME && ((c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME) || (c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))) { /* Type name receiver. */ tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); rec = objc_get_class_reference (id); goto parse_message_args; } first = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_ELLIPSIS) || c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) goto array_desig_after_first; /* Expression receiver. So far only one part without commas has been parsed; there might be more of the expression. */ rec = first; while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; location_t comma_loc, exp_loc; comma_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; next = c_parser_expr_no_commas (parser, NULL); next = default_function_array_conversion (exp_loc, next); rec = build_compound_expr (comma_loc, rec, next.value); } parse_message_args: /* Now parse the objc-message-args. */ args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); mexpr.value = objc_build_message_expr (build_tree_list (rec, args)); mexpr.original_code = ERROR_MARK; mexpr.original_type = NULL; /* Now parse and process the remainder of the initializer, starting with this message expression as a primary-expression. */ c_parser_initval (parser, &mexpr); return; } c_parser_consume_token (parser); first = c_parser_expr_no_commas (parser, NULL).value; array_desig_after_first: if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); second = c_parser_expr_no_commas (parser, NULL).value; } else second = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { c_parser_consume_token (parser); set_init_index (first, second); if (second) pedwarn (ellipsis_loc, OPT_pedantic, "ISO C forbids specifying range of elements to initialize"); } else c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } } if (des_seen >= 1) { if (c_parser_next_token_is (parser, CPP_EQ)) { if (!flag_isoc99) pedwarn (des_loc, OPT_pedantic, "ISO C90 forbids specifying subobject to initialize"); c_parser_consume_token (parser); } else { if (des_seen == 1) pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "obsolete use of designated initializer without %<=%>"); else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected %<=%>"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (init, false); return; } } } } c_parser_initval (parser, NULL); } /* Parse a nested initializer; as c_parser_initializer but parses initializers within braced lists, after any designators have been applied. If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the initializer. */ static void c_parser_initval (c_parser *parser, struct c_expr *after) { struct c_expr init; gcc_assert (!after || c_dialect_objc ()); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after) init = c_parser_braced_init (parser, NULL_TREE, true); else { location_t loc = c_parser_peek_token (parser)->location; init = c_parser_expr_no_commas (parser, after); if (init.value != NULL_TREE && TREE_CODE (init.value) != STRING_CST && TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR) init = default_function_array_conversion (loc, init); } process_init_element (init, false); } /* Parse a compound statement (possibly a function body) (C90 6.6.2, C99 6.8.2). compound-statement: { block-item-list[opt] } { label-declarations block-item-list } block-item-list: block-item block-item-list block-item block-item: nested-declaration statement nested-declaration: declaration GNU extensions: compound-statement: { label-declarations block-item-list } nested-declaration: __extension__ nested-declaration nested-function-definition label-declarations: label-declaration label-declarations label-declaration label-declaration: __label__ identifier-list ; Allowing the mixing of declarations and code is new in C99. The GNU syntax also permits (not shown above) labels at the end of compound statements, which yield an error. We don't allow labels on declarations; this might seem like a natural extension, but there would be a conflict between attributes on the label and prefix attributes on the declaration. ??? The syntax follows the old parser in requiring something after label declarations. Although they are erroneous if the labels declared aren't defined, is it useful for the syntax to be this way? OpenMP: block-item: openmp-directive openmp-directive: barrier-directive flush-directive */ static tree c_parser_compound_statement (c_parser *parser) { tree stmt; location_t brace_loc; brace_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Ensure a scope is entered and left anyway to avoid confusion if we have just prepared to enter a function body. */ stmt = c_begin_compound_stmt (true); c_end_compound_stmt (brace_loc, stmt, true); return error_mark_node; } stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); return c_end_compound_stmt (brace_loc, stmt, true); } /* Parse a compound statement except for the opening brace. This is used for parsing both compound statements and statement expressions (which follow different paths to handling the opening). */ static void c_parser_compound_statement_nostart (c_parser *parser) { bool last_stmt = false; bool last_label = false; bool save_valid_for_pragma = valid_location_for_stdc_pragma_p (); location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); return; } mark_valid_location_for_stdc_pragma (true); if (c_parser_next_token_is_keyword (parser, RID_LABEL)) { /* Read zero or more forward-declarations for labels that nested functions can jump to. */ mark_valid_location_for_stdc_pragma (false); while (c_parser_next_token_is_keyword (parser, RID_LABEL)) { label_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree label; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } label = declare_label (c_parser_peek_token (parser)->value); C_DECLARED_LABEL_FLAG (label) = 1; add_stmt (build_stmt (label_loc, DECL_EXPR, label)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } pedwarn (label_loc, OPT_pedantic, "ISO C forbids label declarations"); } /* We must now have at least one statement, label or declaration. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); c_parser_consume_token (parser); return; } while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) { if (c_parser_next_token_is_keyword (parser, RID_CASE)) label_loc = c_parser_peek_2nd_token (parser)->location; else label_loc = c_parser_peek_token (parser)->location; last_label = true; last_stmt = false; mark_valid_location_for_stdc_pragma (false); c_parser_label (parser); } else if (!last_label && c_parser_next_token_starts_declspecs (parser)) { last_label = false; mark_valid_location_for_stdc_pragma (false); c_parser_declaration_or_fndef (parser, true, true, true, true); if (last_stmt) pedwarn_c90 (loc, (pedantic && !flag_isoc99) ? OPT_pedantic : OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else if (!last_label && c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); last_label = false; mark_valid_location_for_stdc_pragma (false); c_parser_declaration_or_fndef (parser, true, true, true, true); /* Following the old parser, __extension__ does not disable this diagnostic. */ restore_extension_diagnostics (ext); if (last_stmt) pedwarn_c90 (loc, (pedantic && !flag_isoc99) ? OPT_pedantic : OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else goto statement; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { /* External pragmas, and some omp pragmas, are not associated with regular c code, and so are not to be considered statements syntactically. This ensures that the user doesn't put them places that would turn into syntax errors if the directive were ignored. */ if (c_parser_pragma (parser, pragma_compound)) last_label = false, last_stmt = true; } else if (c_parser_next_token_is (parser, CPP_EOF)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); return; } else if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { if (parser->in_if_block) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); error_at (loc, """expected %<}%> before %<else%>"); return; } else { error_at (loc, "%<else%> without a previous %<if%>"); c_parser_consume_token (parser); continue; } } else { statement: last_label = false; last_stmt = true; mark_valid_location_for_stdc_pragma (false); c_parser_statement_after_labels (parser); } parser->error = false; } if (last_label) error_at (label_loc, "label at end of compound statement"); c_parser_consume_token (parser); /* Restore the value we started with. */ mark_valid_location_for_stdc_pragma (save_valid_for_pragma); } /* Parse a label (C90 6.6.1, C99 6.8.1). label: identifier : attributes[opt] case constant-expression : default : GNU extensions: label: case constant-expression ... constant-expression : The use of attributes on labels is a GNU extension. The syntax in GNU C accepts any expressions without commas, non-constant expressions being rejected later. */ static void c_parser_label (c_parser *parser) { location_t loc1 = c_parser_peek_token (parser)->location; tree label = NULL_TREE; if (c_parser_next_token_is_keyword (parser, RID_CASE)) { tree exp1, exp2; c_parser_consume_token (parser); exp1 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); label = do_case (loc1, exp1, NULL_TREE); } else if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); exp2 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, exp1, exp2); } else c_parser_error (parser, "expected %<:%> or %<...%>"); } else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) { c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, NULL_TREE, NULL_TREE); } else { tree name = c_parser_peek_token (parser)->value; tree tlab; tree attrs; location_t loc2 = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is (parser, CPP_NAME)); c_parser_consume_token (parser); gcc_assert (c_parser_next_token_is (parser, CPP_COLON)); c_parser_consume_token (parser); attrs = c_parser_attributes (parser); tlab = define_label (loc2, name); if (tlab) { decl_attributes (&tlab, attrs, 0); label = add_stmt (build_stmt (loc1, LABEL_EXPR, tlab)); } } if (label) { if (c_parser_next_token_starts_declspecs (parser) && !(c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) { error_at (c_parser_peek_token (parser)->location, "a label can only be part of a statement and " "a declaration is not a statement"); c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false, /*nested*/ true, /*empty_ok*/ false, /*start_attr_ok*/ true); } } } /* Parse a statement (C90 6.6, C99 6.8). statement: labeled-statement compound-statement expression-statement selection-statement iteration-statement jump-statement labeled-statement: label statement expression-statement: expression[opt] ; selection-statement: if-statement switch-statement iteration-statement: while-statement do-statement for-statement jump-statement: goto identifier ; continue ; break ; return expression[opt] ; GNU extensions: statement: asm-statement jump-statement: goto * expression ; Objective-C: statement: objc-throw-statement objc-try-catch-statement objc-synchronized-statement objc-throw-statement: @throw expression ; @throw ; OpenMP: statement: openmp-construct openmp-construct: parallel-construct for-construct sections-construct single-construct parallel-for-construct parallel-sections-construct master-construct critical-construct atomic-construct ordered-construct parallel-construct: parallel-directive structured-block for-construct: for-directive iteration-statement sections-construct: sections-directive section-scope single-construct: single-directive structured-block parallel-for-construct: parallel-for-directive iteration-statement parallel-sections-construct: parallel-sections-directive section-scope master-construct: master-directive structured-block critical-construct: critical-directive structured-block atomic-construct: atomic-directive expression-statement ordered-construct: ordered-directive structured-block */ static void c_parser_statement (c_parser *parser) { while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); c_parser_statement_after_labels (parser); } /* Parse a statement, other than a labeled statement. */ static void c_parser_statement_after_labels (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree stmt = NULL_TREE; bool in_if_block = parser->in_if_block; parser->in_if_block = false; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_BRACE: add_stmt (c_parser_compound_statement (parser)); break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_IF: c_parser_if_statement (parser); break; case RID_SWITCH: c_parser_switch_statement (parser); break; case RID_WHILE: c_parser_while_statement (parser); break; case RID_DO: c_parser_do_statement (parser); break; case RID_FOR: c_parser_for_statement (parser); break; case RID_GOTO: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { stmt = c_finish_goto_label (loc, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_MULT)) { c_parser_consume_token (parser); stmt = c_finish_goto_ptr (loc, c_parser_expression (parser).value); } else c_parser_error (parser, "expected identifier or %<*%>"); goto expect_semicolon; case RID_CONTINUE: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_cont_label, false); goto expect_semicolon; case RID_BREAK: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_break_label, true); goto expect_semicolon; case RID_RETURN: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = c_finish_return (loc, NULL_TREE, NULL_TREE); c_parser_consume_token (parser); } else { struct c_expr expr = c_parser_expression_conv (parser); stmt = c_finish_return (loc, expr.value, expr.original_type); goto expect_semicolon; } break; case RID_ASM: stmt = c_parser_asm_statement (parser); break; case RID_THROW: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = objc_build_throw_stmt (loc, NULL_TREE); c_parser_consume_token (parser); } else { tree expr = c_parser_expression (parser).value; expr = c_fully_fold (expr, false, NULL); stmt = objc_build_throw_stmt (loc, expr); goto expect_semicolon; } break; case RID_TRY: gcc_assert (c_dialect_objc ()); c_parser_objc_try_catch_statement (parser); break; case RID_AT_SYNCHRONIZED: gcc_assert (c_dialect_objc ()); c_parser_objc_synchronized_statement (parser); break; default: goto expr_stmt; } break; case CPP_SEMICOLON: c_parser_consume_token (parser); break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: /* Avoid infinite loop in error recovery: c_parser_skip_until_found stops at a closing nesting delimiter without consuming it, but here we need to consume it to proceed further. */ c_parser_error (parser, "expected statement"); c_parser_consume_token (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_stmt); break; default: expr_stmt: stmt = c_finish_expr_stmt (loc, c_parser_expression_conv (parser).value); expect_semicolon: c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); break; } /* Two cases cannot and do not have line numbers associated: If stmt is degenerate, such as "2;", then stmt is an INTEGER_CST, which cannot hold line numbers. But that's OK because the statement will either be changed to a MODIFY_EXPR during gimplification of the statement expr, or discarded. If stmt was compound, but without new variables, we will have skipped the creation of a BIND and will have a bare STATEMENT_LIST. But that's OK because (recursively) all of the component statements should already have line numbers assigned. ??? Can we discard no-op statements earlier? */ if (CAN_HAVE_LOCATION_P (stmt) && EXPR_LOCATION (stmt) == UNKNOWN_LOCATION) SET_EXPR_LOCATION (stmt, loc); parser->in_if_block = in_if_block; } /* Parse the condition from an if, do, while or for statements. */ static tree c_parser_condition (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree cond; cond = c_parser_expression_conv (parser).value; cond = c_objc_common_truthvalue_conversion (loc, cond); cond = c_fully_fold (cond, false, NULL); if (warn_sequence_point) verify_sequence_points (cond); return cond; } /* Parse a parenthesized condition from an if, do or while statement. condition: ( expression ) */ static tree c_parser_paren_condition (c_parser *parser) { tree cond; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return error_mark_node; cond = c_parser_condition (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return cond; } /* Parse a statement which is a block in C99. */ static tree c_parser_c99_block_statement (c_parser *parser) { tree block = c_begin_compound_stmt (flag_isoc99); location_t loc = c_parser_peek_token (parser)->location; c_parser_statement (parser); return c_end_compound_stmt (loc, block, flag_isoc99); } /* Parse the body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we track whether the body is an if statement for the sake of -Wparentheses warnings, (c) we handle an empty body specially for the sake of -Wempty-body warnings, and (d) we call parser_compound_statement directly because c_parser_statement_after_labels resets parser->in_if_block. */ static tree c_parser_if_body (c_parser *parser, bool *if_p) { tree block = c_begin_compound_stmt (flag_isoc99); location_t body_loc = c_parser_peek_token (parser)->location; while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); *if_p = c_parser_next_token_is_keyword (parser, RID_IF); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); if (!c_parser_next_token_is_keyword (parser, RID_ELSE)) warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<if%> statement"); } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) add_stmt (c_parser_compound_statement (parser)); else c_parser_statement_after_labels (parser); return c_end_compound_stmt (body_loc, block, flag_isoc99); } /* Parse the else body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we handle an empty body specially for the sake of -Wempty-body warnings. */ static tree c_parser_else_body (c_parser *parser) { location_t else_loc = c_parser_peek_token (parser)->location; tree block = c_begin_compound_stmt (flag_isoc99); while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<else%> statement"); add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); } else c_parser_statement_after_labels (parser); return c_end_compound_stmt (else_loc, block, flag_isoc99); } /* Parse an if statement (C90 6.6.4, C99 6.8.4). if-statement: if ( expression ) statement if ( expression ) statement else statement */ static void c_parser_if_statement (c_parser *parser) { tree block; location_t loc; tree cond; bool first_if = false; tree first_body, second_body; bool in_if_block; gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); in_if_block = parser->in_if_block; parser->in_if_block = true; first_body = c_parser_if_body (parser, &first_if); parser->in_if_block = in_if_block; if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { c_parser_consume_token (parser); second_body = c_parser_else_body (parser); } else second_body = NULL_TREE; c_finish_if_stmt (loc, cond, first_body, second_body, first_if); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); } /* Parse a switch statement (C90 6.6.4, C99 6.8.4). switch-statement: switch (expression) statement */ static void c_parser_switch_statement (c_parser *parser) { tree block, expr, body, save_break; location_t switch_loc = c_parser_peek_token (parser)->location; location_t switch_cond_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { switch_cond_loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { switch_cond_loc = UNKNOWN_LOCATION; expr = error_mark_node; } c_start_case (switch_loc, switch_cond_loc, expr); save_break = c_break_label; c_break_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_case (body); if (c_break_label) { location_t here = c_parser_peek_token (parser)->location; tree t = build1 (LABEL_EXPR, void_type_node, c_break_label); SET_EXPR_LOCATION (t, here); add_stmt (t); } c_break_label = save_break; add_stmt (c_end_compound_stmt (switch_loc, block, flag_isoc99)); } /* Parse a while statement (C90 6.6.5, C99 6.8.5). while-statement: while (expression) statement */ static void c_parser_while_statement (c_parser *parser) { tree block, cond, body, save_break, save_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* Parse a do statement (C90 6.6.5, C99 6.8.5). do-statement: do statement while ( expression ) ; */ static void c_parser_do_statement (c_parser *parser) { tree block, cond, body, save_break, save_cont, new_break, new_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) warning_at (c_parser_peek_token (parser)->location, OPT_Wempty_body, "suggest braces around empty body in %<do%> statement"); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>"); new_break = c_break_label; c_break_label = save_break; new_cont = c_cont_label; c_cont_label = save_cont; cond = c_parser_paren_condition (parser); if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); c_finish_loop (loc, cond, NULL, body, new_break, new_cont, false); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); } /* Parse a for statement (C90 6.6.5, C99 6.8.5). for-statement: for ( expression[opt] ; expression[opt] ; expression[opt] ) statement for ( nested-declaration expression[opt] ; expression[opt] ) statement The form with a declaration is new in C99. ??? In accordance with the old parser, the declaration may be a nested function, which is then rejected in check_for_loop_decls, but does it make any sense for this to be included in the grammar? Note in particular that the nested function does not include a trailing ';', whereas the "declaration" production includes one. Also, can we reject bad declarations earlier and cheaper than check_for_loop_decls? */ static void c_parser_for_statement (c_parser *parser) { tree block, cond, incr, save_break, save_cont, body; location_t loc = c_parser_peek_token (parser)->location; location_t for_loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { /* Parse the initialization declaration or expression. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); c_finish_expr_stmt (loc, NULL_TREE); } else if (c_parser_next_token_starts_declspecs (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true); check_for_loop_decls (for_loc); } else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_declaration_or_fndef (parser, true, true, true, true); restore_extension_diagnostics (ext); check_for_loop_decls (for_loc); } else goto init_expr; } else { init_expr: c_finish_expr_stmt (loc, c_parser_expression (parser).value); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse the loop condition. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); cond = NULL_TREE; } else { cond = c_parser_condition (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse the increment expression. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) incr = c_process_expr_stmt (loc, NULL_TREE); else incr = c_process_expr_stmt (loc, c_parser_expression (parser).value); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { cond = error_mark_node; incr = error_mark_node; } save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* Parse an asm statement, a GNU extension. This is a full-blown asm statement with inputs, outputs, clobbers, and volatile tag allowed. asm-statement: asm type-qualifier[opt] ( asm-argument ) ; asm type-qualifier[opt] goto ( asm-goto-argument ) ; asm-argument: asm-string-literal asm-string-literal : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers[opt] asm-goto-argument: asm-string-literal : : asm-operands[opt] : asm-clobbers[opt] \ : asm-goto-operands Qualifiers other than volatile are accepted in the syntax but warned for. */ static tree c_parser_asm_statement (c_parser *parser) { tree quals, str, outputs, inputs, clobbers, labels, ret; bool simple, is_goto; location_t asm_loc = c_parser_peek_token (parser)->location; int section, nsections; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_VOLATILE)) { quals = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is_keyword (parser, RID_CONST) || c_parser_next_token_is_keyword (parser, RID_RESTRICT)) { warning_at (c_parser_peek_token (parser)->location, 0, "%E qualifier ignored on asm", c_parser_peek_token (parser)->value); quals = NULL_TREE; c_parser_consume_token (parser); } else quals = NULL_TREE; is_goto = false; if (c_parser_next_token_is_keyword (parser, RID_GOTO)) { c_parser_consume_token (parser); is_goto = true; } /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; ret = NULL; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto error; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) goto error_close_paren; simple = true; outputs = NULL_TREE; inputs = NULL_TREE; clobbers = NULL_TREE; labels = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; /* Parse each colon-delimited section of operands. */ nsections = 3 + is_goto; for (section = 0; section < nsections; ++section) { if (!c_parser_require (parser, CPP_COLON, is_goto ? "expected %<:%>" : "expected %<:%> or %<)%>")) goto error_close_paren; /* Once past any colon, we're no longer a simple asm. */ simple = false; if ((!c_parser_next_token_is (parser, CPP_COLON) && !c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) || section == 3) switch (section) { case 0: /* For asm goto, we don't allow output operands, but reserve the slot for a future extension that does allow them. */ if (!is_goto) outputs = c_parser_asm_operands (parser, false); break; case 1: inputs = c_parser_asm_operands (parser, true); break; case 2: clobbers = c_parser_asm_clobbers (parser); break; case 3: labels = c_parser_asm_goto_operands (parser); break; default: gcc_unreachable (); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; } done_asm: if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); ret = build_asm_stmt (quals, build_asm_expr (asm_loc, str, outputs, inputs, clobbers, labels, simple)); error: parser->lex_untranslated_string = false; return ret; error_close_paren: c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } /* Parse asm operands, a GNU extension. If CONVERT_P (for inputs but not outputs), apply the default conversion of functions and arrays to pointers. asm-operands: asm-operand asm-operands , asm-operand asm-operand: asm-string-literal ( expression ) [ identifier ] asm-string-literal ( expression ) */ static tree c_parser_asm_operands (c_parser *parser, bool convert_p) { tree list = NULL_TREE; location_t loc; while (true) { tree name, str; struct c_expr expr; if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); name = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id)); } else { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return NULL_TREE; } c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } else name = NULL_TREE; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) return NULL_TREE; parser->lex_untranslated_string = false; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = true; return NULL_TREE; } loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); if (convert_p) expr = default_function_array_conversion (loc, expr); expr.value = c_fully_fold (expr.value, false, NULL); parser->lex_untranslated_string = true; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } list = chainon (list, build_tree_list (build_tree_list (name, str), expr.value)); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm clobbers, a GNU extension. asm-clobbers: asm-string-literal asm-clobbers , asm-string-literal */ static tree c_parser_asm_clobbers (c_parser *parser) { tree list = NULL_TREE; while (true) { tree str = c_parser_asm_string_literal (parser); if (str) list = tree_cons (NULL_TREE, str, list); else return NULL_TREE; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm goto labels, a GNU extension. asm-goto-operands: identifier asm-goto-operands , identifier */ static tree c_parser_asm_goto_operands (c_parser *parser) { tree list = NULL_TREE; while (true) { tree name, label; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *tok = c_parser_peek_token (parser); name = tok->value; label = lookup_label_for_goto (tok->location, name); c_parser_consume_token (parser); TREE_USED (label) = 1; } else { c_parser_error (parser, "expected identifier"); return NULL_TREE; } name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); list = tree_cons (name, label, list); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else return nreverse (list); } } /* Parse an expression other than a compound expression; that is, an assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. assignment-expression: conditional-expression unary-expression assignment-operator assignment-expression assignment-operator: one of = *= /= %= += -= <<= >>= &= ^= |= In GNU C we accept any conditional expression on the LHS and diagnose the invalid lvalue rather than producing a syntax error. */ static struct c_expr c_parser_expr_no_commas (c_parser *parser, struct c_expr *after) { struct c_expr lhs, rhs, ret; enum tree_code code; location_t op_location, exp_location; gcc_assert (!after || c_dialect_objc ()); lhs = c_parser_conditional_expression (parser, after); op_location = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_EQ: code = NOP_EXPR; break; case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: code = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; default: return lhs; } c_parser_consume_token (parser); exp_location = c_parser_peek_token (parser)->location; rhs = c_parser_expr_no_commas (parser, NULL); rhs = default_function_array_conversion (exp_location, rhs); ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type, code, exp_location, rhs.value, rhs.original_type); if (code == NOP_EXPR) ret.original_code = MODIFY_EXPR; else { TREE_NO_WARNING (ret.value) = 1; ret.original_code = ERROR_MARK; } ret.original_type = NULL; return ret; } /* Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. conditional-expression: logical-OR-expression logical-OR-expression ? expression : conditional-expression GNU extensions: conditional-expression: logical-OR-expression ? : conditional-expression */ static struct c_expr c_parser_conditional_expression (c_parser *parser, struct c_expr *after) { struct c_expr cond, exp1, exp2, ret; location_t cond_loc, colon_loc; gcc_assert (!after || c_dialect_objc ()); cond = c_parser_binary_expression (parser, after); if (c_parser_next_token_is_not (parser, CPP_QUERY)) return cond; cond_loc = c_parser_peek_token (parser)->location; cond = default_function_array_conversion (cond_loc, cond); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COLON)) { tree eptype = NULL_TREE; pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C forbids omitting the middle term of a ?: expression"); if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE (cond.value); cond.value = TREE_OPERAND (cond.value, 0); } /* Make sure first operand is calculated only once. */ exp1.value = c_save_expr (default_conversion (cond.value)); if (eptype) exp1.value = build1 (EXCESS_PRECISION_EXPR, eptype, exp1.value); exp1.original_type = NULL; cond.value = c_objc_common_truthvalue_conversion (cond_loc, exp1.value); c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node; } else { cond.value = c_objc_common_truthvalue_conversion (cond_loc, default_conversion (cond.value)); c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node; exp1 = c_parser_expression_conv (parser); c_inhibit_evaluation_warnings += ((cond.value == truthvalue_true_node) - (cond.value == truthvalue_false_node)); } colon_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } { location_t exp2_loc = c_parser_peek_token (parser)->location; exp2 = c_parser_conditional_expression (parser, NULL); exp2 = default_function_array_conversion (exp2_loc, exp2); } c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = build_conditional_expr (colon_loc, cond.value, cond.original_code == C_MAYBE_CONST_EXPR, exp1.value, exp1.original_type, exp2.value, exp2.original_type); ret.original_code = ERROR_MARK; if (exp1.value == error_mark_node || exp2.value == error_mark_node) ret.original_type = NULL; else { tree t1, t2; /* If both sides are enum type, the default conversion will have made the type of the result be an integer type. We want to remember the enum types we started with. */ t1 = exp1.original_type ? exp1.original_type : TREE_TYPE (exp1.value); t2 = exp2.original_type ? exp2.original_type : TREE_TYPE (exp2.value); ret.original_type = ((t1 != error_mark_node && t2 != error_mark_node && (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))) ? t1 : NULL); } return ret; } /* Parse a binary expression; that is, a logical-OR-expression (C90 6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. multiplicative-expression: cast-expression multiplicative-expression * cast-expression multiplicative-expression / cast-expression multiplicative-expression % cast-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression AND-expression: equality-expression AND-expression & equality-expression exclusive-OR-expression: AND-expression exclusive-OR-expression ^ AND-expression inclusive-OR-expression: exclusive-OR-expression inclusive-OR-expression | exclusive-OR-expression logical-AND-expression: inclusive-OR-expression logical-AND-expression && inclusive-OR-expression logical-OR-expression: logical-AND-expression logical-OR-expression || logical-AND-expression */ static struct c_expr c_parser_binary_expression (c_parser *parser, struct c_expr *after) { /* A binary expression is parsed using operator-precedence parsing, with the operands being cast expressions. All the binary operators are left-associative. Thus a binary expression is of form: E0 op1 E1 op2 E2 ... which we represent on a stack. On the stack, the precedence levels are strictly increasing. When a new operator is encountered of higher precedence than that at the top of the stack, it is pushed; its LHS is the top expression, and its RHS is everything parsed until it is popped. When a new operator is encountered with precedence less than or equal to that at the top of the stack, triples E[i-1] op[i] E[i] are popped and replaced by the result of the operation until the operator at the top of the stack has lower precedence than the new operator or there is only one element on the stack; then the top expression is the LHS of the new operator. In the case of logical AND and OR expressions, we also need to adjust c_inhibit_evaluation_warnings as appropriate when the operators are pushed and popped. */ /* The precedence levels, where 0 is a dummy lowest level used for the bottom of the stack. */ enum prec { PREC_NONE, PREC_LOGOR, PREC_LOGAND, PREC_BITOR, PREC_BITXOR, PREC_BITAND, PREC_EQ, PREC_REL, PREC_SHIFT, PREC_ADD, PREC_MULT, NUM_PRECS }; struct { /* The expression at this stack level. */ struct c_expr expr; /* The precedence of the operator on its left, PREC_NONE at the bottom of the stack. */ enum prec prec; /* The operation on its left. */ enum tree_code op; /* The source location of this operation. */ location_t loc; } stack[NUM_PRECS]; int sp; /* Location of the binary operator. */ location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */ #define POP \ do { \ switch (stack[sp].op) \ { \ case TRUTH_ANDIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_false_node); \ break; \ case TRUTH_ORIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_true_node); \ break; \ default: \ break; \ } \ stack[sp - 1].expr \ = default_function_array_conversion (stack[sp - 1].loc, \ stack[sp - 1].expr); \ stack[sp].expr \ = default_function_array_conversion (stack[sp].loc, stack[sp].expr); \ stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \ stack[sp].op, \ stack[sp - 1].expr, \ stack[sp].expr); \ sp--; \ } while (0) gcc_assert (!after || c_dialect_objc ()); stack[0].loc = c_parser_peek_token (parser)->location; stack[0].expr = c_parser_cast_expression (parser, after); stack[0].prec = PREC_NONE; sp = 0; while (true) { enum prec oprec; enum tree_code ocode; if (parser->error) goto out; switch (c_parser_peek_token (parser)->type) { case CPP_MULT: oprec = PREC_MULT; ocode = MULT_EXPR; break; case CPP_DIV: oprec = PREC_MULT; ocode = TRUNC_DIV_EXPR; break; case CPP_MOD: oprec = PREC_MULT; ocode = TRUNC_MOD_EXPR; break; case CPP_PLUS: oprec = PREC_ADD; ocode = PLUS_EXPR; break; case CPP_MINUS: oprec = PREC_ADD; ocode = MINUS_EXPR; break; case CPP_LSHIFT: oprec = PREC_SHIFT; ocode = LSHIFT_EXPR; break; case CPP_RSHIFT: oprec = PREC_SHIFT; ocode = RSHIFT_EXPR; break; case CPP_LESS: oprec = PREC_REL; ocode = LT_EXPR; break; case CPP_GREATER: oprec = PREC_REL; ocode = GT_EXPR; break; case CPP_LESS_EQ: oprec = PREC_REL; ocode = LE_EXPR; break; case CPP_GREATER_EQ: oprec = PREC_REL; ocode = GE_EXPR; break; case CPP_EQ_EQ: oprec = PREC_EQ; ocode = EQ_EXPR; break; case CPP_NOT_EQ: oprec = PREC_EQ; ocode = NE_EXPR; break; case CPP_AND: oprec = PREC_BITAND; ocode = BIT_AND_EXPR; break; case CPP_XOR: oprec = PREC_BITXOR; ocode = BIT_XOR_EXPR; break; case CPP_OR: oprec = PREC_BITOR; ocode = BIT_IOR_EXPR; break; case CPP_AND_AND: oprec = PREC_LOGAND; ocode = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: oprec = PREC_LOGOR; ocode = TRUTH_ORIF_EXPR; break; default: /* Not a binary operator, so end of the binary expression. */ goto out; } binary_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); while (oprec <= stack[sp].prec) POP; switch (ocode) { case TRUTH_ANDIF_EXPR: stack[sp].expr = default_function_array_conversion (stack[sp].loc, stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_false_node); break; case TRUTH_ORIF_EXPR: stack[sp].expr = default_function_array_conversion (stack[sp].loc, stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_true_node); break; default: break; } sp++; stack[sp].loc = binary_loc; stack[sp].expr = c_parser_cast_expression (parser, NULL); stack[sp].prec = oprec; stack[sp].op = ocode; stack[sp].loc = binary_loc; } out: while (sp > 0) POP; return stack[0].expr; #undef POP } /* Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. cast-expression: unary-expression ( type-name ) unary-expression */ static struct c_expr c_parser_cast_expression (c_parser *parser, struct c_expr *after) { location_t cast_loc = c_parser_peek_token (parser)->location; gcc_assert (!after || c_dialect_objc ()); if (after) return c_parser_postfix_expression_after_primary (parser, cast_loc, *after); /* If the expression begins with a parenthesized type name, it may be either a cast or a compound literal; we need to see whether the next character is '{' to tell the difference. If not, it is an unary expression. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { struct c_type_name *type_name; struct c_expr ret; struct c_expr expr; c_parser_consume_token (parser); type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } /* Save casted types in the function's used types hash table. */ used_types_insert (type_name->specs->type); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_postfix_expression_after_paren_type (parser, type_name, cast_loc); { location_t expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); expr = default_function_array_conversion (expr_loc, expr); } ret.value = c_cast_expr (cast_loc, type_name, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else return c_parser_unary_expression (parser); } /* Parse an unary expression (C90 6.3.3, C99 6.5.3). unary-expression: postfix-expression ++ unary-expression -- unary-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-name ) unary-operator: one of & * + - ~ ! GNU extensions: unary-expression: __alignof__ unary-expression __alignof__ ( type-name ) && identifier unary-operator: one of __extension__ __real__ __imag__ In addition, the GNU syntax treats ++ and -- as unary operators, so they may be applied to cast expressions with errors for non-lvalues given later. */ static struct c_expr c_parser_unary_expression (c_parser *parser) { int ext; struct c_expr ret, op; location_t op_loc = c_parser_peek_token (parser)->location; location_t exp_loc; ret.original_code = ERROR_MARK; ret.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS_PLUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREINCREMENT_EXPR, op); case CPP_MINUS_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op); case CPP_AND: c_parser_consume_token (parser); return parser_build_unary_op (op_loc, ADDR_EXPR, c_parser_cast_expression (parser, NULL)); case CPP_MULT: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); ret.value = build_indirect_ref (op_loc, op.value, RO_UNARY_STAR); return ret; case CPP_PLUS: if (!c_dialect_objc () && !in_system_header) warning_at (op_loc, OPT_Wtraditional, "traditional C rejects the unary plus operator"); c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, CONVERT_EXPR, op); case CPP_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, NEGATE_EXPR, op); case CPP_COMPL: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, BIT_NOT_EXPR, op); case CPP_NOT: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, TRUTH_NOT_EXPR, op); case CPP_AND_AND: /* Refer to the address of a label as a pointer. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { ret.value = finish_label_address_expr (c_parser_peek_token (parser)->value, op_loc); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected identifier"); ret.value = error_mark_node; } return ret; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_SIZEOF: return c_parser_sizeof_expression (parser); case RID_ALIGNOF: return c_parser_alignof_expression (parser); case RID_EXTENSION: c_parser_consume_token (parser); ext = disable_extension_diagnostics (); ret = c_parser_cast_expression (parser, NULL); restore_extension_diagnostics (ext); return ret; case RID_REALPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, REALPART_EXPR, op); case RID_IMAGPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, IMAGPART_EXPR, op); default: return c_parser_postfix_expression (parser); } default: return c_parser_postfix_expression (parser); } } /* Parse a sizeof expression. */ static struct c_expr c_parser_sizeof_expression (c_parser *parser) { struct c_expr expr; location_t expr_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_sizeof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either sizeof ( type-name ) or sizeof unary-expression starting with a compound literal. */ struct c_type_name *type_name; c_parser_consume_token (parser); expr_loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_sizeof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, expr_loc); goto sizeof_expr; } /* sizeof ( type-name ). */ c_inhibit_evaluation_warnings--; in_sizeof--; return c_expr_sizeof_type (expr_loc, type_name); } else { expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_unary_expression (parser); sizeof_expr: c_inhibit_evaluation_warnings--; in_sizeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (expr_loc, "%<sizeof%> applied to a bit-field"); return c_expr_sizeof_expr (expr_loc, expr); } } /* Parse an alignof expression. */ static struct c_expr c_parser_alignof_expression (c_parser *parser) { struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_alignof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either __alignof__ ( type-name ) or __alignof__ unary-expression starting with a compound literal. */ location_t loc; struct c_type_name *type_name; struct c_expr ret; c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_alignof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, loc); goto alignof_expr; } /* alignof ( type-name ). */ c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_alignof (loc, groktypename (type_name, NULL, NULL)); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else { struct c_expr ret; expr = c_parser_unary_expression (parser); alignof_expr: c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_alignof_expr (loc, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } } /* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2). postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( argument-expression-list[opt] ) postfix-expression . identifier postfix-expression -> identifier postfix-expression ++ postfix-expression -- ( type-name ) { initializer-list } ( type-name ) { initializer-list , } argument-expression-list: argument-expression argument-expression-list , argument-expression primary-expression: identifier constant string-literal ( expression ) GNU extensions: primary-expression: __func__ (treated as a keyword in GNU C) __FUNCTION__ __PRETTY_FUNCTION__ ( compound-statement ) __builtin_va_arg ( assignment-expression , type-name ) __builtin_offsetof ( type-name , offsetof-member-designator ) __builtin_choose_expr ( assignment-expression , assignment-expression , assignment-expression ) __builtin_types_compatible_p ( type-name , type-name ) offsetof-member-designator: identifier offsetof-member-designator . identifier offsetof-member-designator [ expression ] Objective-C: primary-expression: [ objc-receiver objc-message-args ] @selector ( objc-selector-arg ) @protocol ( identifier ) @encode ( type-name ) objc-string-literal */ static struct c_expr c_parser_postfix_expression (c_parser *parser) { struct c_expr expr, e1, e2, e3; struct c_type_name *t1, *t2; location_t loc = c_parser_peek_token (parser)->location;; expr.original_code = ERROR_MARK; expr.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_NUMBER: expr.value = c_parser_peek_token (parser)->value; loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (TREE_CODE (expr.value) == FIXED_CST && !targetm.fixed_point_supported_p ()) { error_at (loc, "fixed-point types not supported for this target"); expr.value = error_mark_node; } break; case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: expr.value = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: expr.value = c_parser_peek_token (parser)->value; expr.original_code = STRING_CST; c_parser_consume_token (parser); break; case CPP_OBJC_STRING: gcc_assert (c_dialect_objc ()); expr.value = objc_build_string_object (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case CPP_NAME: if (c_parser_peek_token (parser)->id_kind != C_ID_ID) { c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); expr.value = build_external_ref (loc, id, (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN), &expr.original_type); } break; case CPP_OPEN_PAREN: /* A parenthesized expression, statement expression or compound literal. */ if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE) { /* A statement expression. */ tree stmt; location_t brace_loc; c_parser_consume_token (parser); brace_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (cur_stmt_list == NULL) { error_at (loc, "braced-group within expression allowed " "only inside a function"); parser->error = true; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } stmt = c_begin_stmt_expr (); c_parser_compound_statement_nostart (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pedwarn (loc, OPT_pedantic, "ISO C forbids braced-groups within expressions"); expr.value = c_finish_stmt_expr (brace_loc, stmt); } else if (c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* A compound literal. ??? Can we actually get here rather than going directly to c_parser_postfix_expression_after_paren_type from elsewhere? */ location_t loc; struct c_type_name *type_name; c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { expr.value = error_mark_node; } else expr = c_parser_postfix_expression_after_paren_type (parser, type_name, loc); } else { /* A parenthesized expression. */ c_parser_consume_token (parser); expr = c_parser_expression (parser); if (TREE_CODE (expr.value) == MODIFY_EXPR) TREE_NO_WARNING (expr.value) = 1; if (expr.original_code != C_MAYBE_CONST_EXPR) expr.original_code = ERROR_MARK; /* Don't change EXPR.ORIGINAL_TYPE. */ c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_VA_ARG: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } e1 = c_parser_expr_no_commas (parser, NULL); e1.value = c_fully_fold (e1.value, false, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } loc = c_parser_peek_token (parser)->location; t1 = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t1 == NULL) { expr.value = error_mark_node; } else { tree type_expr = NULL_TREE; expr.value = c_build_va_arg (loc, e1.value, groktypename (t1, &type_expr, NULL)); if (type_expr) { expr.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr.value), type_expr, expr.value); C_MAYBE_CONST_EXPR_NON_CONST (expr.value) = true; } } break; case RID_OFFSETOF: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree type = groktypename (t1, NULL, NULL); tree offsetof_ref; if (type == error_mark_node) offsetof_ref = error_mark_node; else { offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node); SET_EXPR_LOCATION (offsetof_ref, loc); } /* Parse the second argument to __builtin_offsetof. We must have one identifier, and beyond that we want to accept sub structure and sub array references. */ if (c_parser_next_token_is (parser, CPP_NAME)) { offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_DOT) || c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DEREF)) { if (c_parser_next_token_is (parser, CPP_DEREF)) { loc = c_parser_peek_token (parser)->location; offsetof_ref = build_array_ref (loc, offsetof_ref, integer_zero_node); goto do_dot; } else if (c_parser_next_token_is (parser, CPP_DOT)) { do_dot: c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { tree idx; loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); idx = c_parser_expression (parser).value; idx = c_fully_fold (idx, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); offsetof_ref = build_array_ref (loc, offsetof_ref, idx); } } } else c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = fold_offsetof (offsetof_ref, NULL_TREE); } break; case RID_CHOOSE_EXPR: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } loc = c_parser_peek_token (parser)->location; e1 = c_parser_expr_no_commas (parser, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } e2 = c_parser_expr_no_commas (parser, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } e3 = c_parser_expr_no_commas (parser, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree c; c = e1.value; if (TREE_CODE (c) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (c))) error_at (loc, "first argument to %<__builtin_choose_expr%> not" " a constant"); constant_expression_warning (c); expr = integer_zerop (c) ? e3 : e2; } break; case RID_TYPES_COMPATIBLE_P: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } t2 = c_parser_type_name (parser); if (t2 == NULL) { expr.value = error_mark_node; break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree e1, e2; e1 = TYPE_MAIN_VARIANT (groktypename (t1, NULL, NULL)); e2 = TYPE_MAIN_VARIANT (groktypename (t2, NULL, NULL)); expr.value = comptypes (e1, e2) ? build_int_cst (NULL_TREE, 1) : build_int_cst (NULL_TREE, 0); } break; case RID_AT_SELECTOR: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } { tree sel = c_parser_objc_selector_arg (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_selector_expr (loc, sel); } break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_protocol_expr (id); } break; case RID_AT_ENCODE: /* Extension to support C-structures in the archiver. */ gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree type = groktypename (t1, NULL, NULL); expr.value = objc_build_encode_expr (type); } break; default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } break; case CPP_OPEN_SQUARE: if (c_dialect_objc ()) { tree receiver, args; c_parser_consume_token (parser); receiver = c_parser_objc_receiver (parser); args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = objc_build_message_expr (build_tree_list (receiver, args)); break; } /* Else fall through to report error. */ default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } return c_parser_postfix_expression_after_primary (parser, loc, expr); } /* Parse a postfix expression after a parenthesized type name: the brace-enclosed initializer of a compound literal, possibly followed by some postfix operators. This is separate because it is not possible to tell until after the type name whether a cast expression has a cast or a compound literal, or whether the operand of sizeof is a parenthesized type name or starts with a compound literal. TYPE_LOC is the location where TYPE_NAME starts--the location of the first token after the parentheses around the type name. */ static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *parser, struct c_type_name *type_name, location_t type_loc) { tree type; struct c_expr init; bool non_const; struct c_expr expr; location_t start_loc; tree type_expr = NULL_TREE; bool type_expr_const = true; check_compound_literal_type (type_loc, type_name); start_init (NULL_TREE, NULL, 0); type = groktypename (type_name, &type_expr, &type_expr_const); start_loc = c_parser_peek_token (parser)->location; if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type)) { error_at (type_loc, "compound literal has variable size"); type = error_mark_node; } init = c_parser_braced_init (parser, type, false); finish_init (); maybe_warn_string_init (type, init); if (type != error_mark_node && !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type)) && current_function_decl) { error ("compound literal qualified by address-space qualifier"); type = error_mark_node; } if (!flag_isoc99) pedwarn (start_loc, OPT_pedantic, "ISO C90 forbids compound literals"); non_const = ((init.value && TREE_CODE (init.value) == CONSTRUCTOR) ? CONSTRUCTOR_NON_CONST (init.value) : init.original_code == C_MAYBE_CONST_EXPR); non_const |= !type_expr_const; expr.value = build_compound_literal (start_loc, type, init.value, non_const); expr.original_code = ERROR_MARK; expr.original_type = NULL; if (type_expr) { if (TREE_CODE (expr.value) == C_MAYBE_CONST_EXPR) { gcc_assert (C_MAYBE_CONST_EXPR_PRE (expr.value) == NULL_TREE); C_MAYBE_CONST_EXPR_PRE (expr.value) = type_expr; } else { gcc_assert (!non_const); expr.value = build2 (C_MAYBE_CONST_EXPR, type, type_expr, expr.value); } } return c_parser_postfix_expression_after_primary (parser, start_loc, expr); } /* Parse a postfix expression after the initial primary or compound literal; that is, parse a series of postfix operators. EXPR_LOC is the location of the primary expression. */ static struct c_expr c_parser_postfix_expression_after_primary (c_parser *parser, location_t expr_loc, struct c_expr expr) { struct c_expr orig_expr; tree ident, idx; VEC(tree,gc) *exprlist; VEC(tree,gc) *origtypes; while (true) { location_t op_loc = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_SQUARE: /* Array reference. */ c_parser_consume_token (parser); idx = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = build_array_ref (op_loc, expr.value, idx); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_OPEN_PAREN: /* Function call. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) exprlist = NULL; else exprlist = c_parser_expr_list (parser, true, false, &origtypes); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); orig_expr = expr; /* FIXME diagnostics: Ideally we want the FUNCNAME, not the "(" after the FUNCNAME, which is what we have now. */ expr.value = build_function_call_vec (op_loc, expr.value, exprlist, origtypes); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) == INTEGER_CST && TREE_CODE (orig_expr.value) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (orig_expr.value) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P) expr.original_code = C_MAYBE_CONST_EXPR; expr.original_type = NULL; if (exprlist != NULL) { release_tree_vector (exprlist); release_tree_vector (origtypes); } break; case CPP_DOT: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, expr.value, ident); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_DEREF: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, build_indirect_ref (op_loc, expr.value, RO_ARROW), ident); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_PLUS_PLUS: /* Postincrement. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTINCREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_MINUS_MINUS: /* Postdecrement. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTDECREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; default: return expr; } } } /* Parse an expression (C90 6.3.17, C99 6.5.17). expression: assignment-expression expression , assignment-expression */ static struct c_expr c_parser_expression (c_parser *parser) { struct c_expr expr; expr = c_parser_expr_no_commas (parser, NULL); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; location_t loc = c_parser_peek_token (parser)->location; location_t expr_loc; c_parser_consume_token (parser); expr_loc = c_parser_peek_token (parser)->location; next = c_parser_expr_no_commas (parser, NULL); next = default_function_array_conversion (expr_loc, next); expr.value = build_compound_expr (loc, expr.value, next.value); expr.original_code = COMPOUND_EXPR; expr.original_type = next.original_type; } return expr; } /* Parse an expression and convert functions or arrays to pointers. */ static struct c_expr c_parser_expression_conv (c_parser *parser) { struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); expr = default_function_array_conversion (loc, expr); return expr; } /* Parse a non-empty list of expressions. If CONVERT_P, convert functions and arrays to pointers. If FOLD_P, fold the expressions. nonempty-expr-list: assignment-expression nonempty-expr-list , assignment-expression */ static VEC(tree,gc) * c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p, VEC(tree,gc) **p_orig_types) { VEC(tree,gc) *ret; VEC(tree,gc) *orig_types; struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; ret = make_tree_vector (); if (p_orig_types == NULL) orig_types = NULL; else orig_types = make_tree_vector (); expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = default_function_array_conversion (loc, expr); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); VEC_quick_push (tree, ret, expr.value); if (orig_types != NULL) VEC_quick_push (tree, orig_types, expr.original_type); while (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = default_function_array_conversion (loc, expr); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); VEC_safe_push (tree, gc, ret, expr.value); if (orig_types != NULL) VEC_safe_push (tree, gc, orig_types, expr.original_type); } if (orig_types != NULL) *p_orig_types = orig_types; return ret; } /* Parse Objective-C-specific constructs. */ /* Parse an objc-class-definition. objc-class-definition: @interface identifier objc-superclass[opt] objc-protocol-refs[opt] objc-class-instance-variables[opt] objc-methodprotolist @end @implementation identifier objc-superclass[opt] objc-class-instance-variables[opt] @interface identifier ( identifier ) objc-protocol-refs[opt] objc-methodprotolist @end @implementation identifier ( identifier ) objc-superclass: : identifier "@interface identifier (" must start "@interface identifier ( identifier ) ...": objc-methodprotolist in the first production may not start with a parenthesized identifier as a declarator of a data definition with no declaration specifiers if the objc-superclass, objc-protocol-refs and objc-class-instance-variables are omitted. */ static void c_parser_objc_class_definition (c_parser *parser) { bool iface_p; tree id1; tree superclass; if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE)) iface_p = true; else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION)) iface_p = false; else gcc_unreachable (); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree id2; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!iface_p) { objc_start_category_implementation (id1, id2); return; } if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_category_interface (id1, id2, proto); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); return; } if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } superclass = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else superclass = NULL_TREE; if (iface_p) { tree proto = NULL_TREE; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_class_interface (id1, superclass, proto); } else objc_start_class_implementation (id1, superclass); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) c_parser_objc_class_instance_variables (parser); if (iface_p) { objc_continue_interface (); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); } else { objc_continue_implementation (); return; } } /* Parse objc-class-instance-variables. objc-class-instance-variables: { objc-instance-variable-decl-list[opt] } objc-instance-variable-decl-list: objc-visibility-spec objc-instance-variable-decl ; ; objc-instance-variable-decl-list objc-visibility-spec objc-instance-variable-decl-list objc-instance-variable-decl ; objc-instance-variable-decl-list ; objc-visibility-spec: @private @protected @public objc-instance-variable-decl: struct-declaration */ static void c_parser_objc_class_instance_variables (c_parser *parser) { gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); while (c_parser_next_token_is_not (parser, CPP_EOF)) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the instance variables. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Parse any objc-visibility-spec. */ if (c_parser_next_token_is_keyword (parser, RID_PRIVATE)) { c_parser_consume_token (parser); objc_set_visibility (2); continue; } else if (c_parser_next_token_is_keyword (parser, RID_PROTECTED)) { c_parser_consume_token (parser); objc_set_visibility (0); continue; } else if (c_parser_next_token_is_keyword (parser, RID_PUBLIC)) { c_parser_consume_token (parser); objc_set_visibility (1); continue; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external); continue; } /* Parse some comma-separated declarations. */ decls = c_parser_struct_declaration (parser); { /* Comma-separated instance variables are chained together in reverse order; add them one by one. */ tree ivar = nreverse (decls); for (; ivar; ivar = TREE_CHAIN (ivar)) objc_add_instance_variable (copy_node (ivar)); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } /* Parse an objc-class-declaration. objc-class-declaration: @class identifier-list ; */ static void c_parser_objc_class_declaration (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is_keyword (parser, RID_CLASS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_class (list); } /* Parse an objc-alias-declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; */ static void c_parser_objc_alias_declaration (c_parser *parser) { tree id1, id2; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_alias (id1, id2); } /* Parse an objc-protocol-definition. objc-protocol-definition: @protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end @protocol identifier-list ; "@protocol identifier ;" should be resolved as "@protocol identifier-list ;": objc-methodprotolist may not start with a semicolon in the first alternative if objc-protocol-refs are omitted. */ static void c_parser_objc_protocol_definition (c_parser *parser) { gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON) { tree list = NULL_TREE; /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_protocols (list); } else { tree id = c_parser_peek_token (parser)->value; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); parser->objc_pq_context = true; objc_start_protocol (id, proto); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); parser->objc_pq_context = false; objc_finish_interface (); } } /* Parse an objc-method-type. objc-method-type: + - */ static enum tree_code c_parser_objc_method_type (c_parser *parser) { switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: c_parser_consume_token (parser); return PLUS_EXPR; case CPP_MINUS: c_parser_consume_token (parser); return MINUS_EXPR; default: gcc_unreachable (); } } /* Parse an objc-method-definition. objc-method-definition: objc-method-type objc-method-decl ;[opt] compound-statement */ static void c_parser_objc_method_definition (c_parser *parser) { enum tree_code type = c_parser_objc_method_type (parser); tree decl; objc_set_method_type (type); parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "extra semicolon in method definition specified"); } if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_error (parser, "expected %<{%>"); return; } parser->objc_pq_context = false; objc_start_method_definition (decl); add_stmt (c_parser_compound_statement (parser)); objc_finish_method_definition (current_function_decl); } /* Parse an objc-methodprotolist. objc-methodprotolist: empty objc-methodprotolist objc-methodproto objc-methodprotolist declaration objc-methodprotolist ; The declaration is a data definition, which may be missing declaration specifiers under the same rules and diagnostics as other data definitions outside functions, and the stray semicolon is diagnosed the same way as a stray semicolon outside a function. */ static void c_parser_objc_methodprotolist (c_parser *parser) { while (true) { /* The list is terminated by @end. */ switch (c_parser_peek_token (parser)->type) { case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PLUS: case CPP_MINUS: c_parser_objc_methodproto (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_external); break; case CPP_EOF: return; default: if (c_parser_next_token_is_keyword (parser, RID_AT_END)) return; c_parser_declaration_or_fndef (parser, false, true, false, true); break; } } } /* Parse an objc-methodproto. objc-methodproto: objc-method-type objc-method-decl ; */ static void c_parser_objc_methodproto (c_parser *parser) { enum tree_code type = c_parser_objc_method_type (parser); tree decl; objc_set_method_type (type); /* Remember protocol qualifiers in prototypes. */ parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser); /* Forget protocol qualifiers here. */ parser->objc_pq_context = false; objc_add_method_declaration (decl); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse an objc-method-decl. objc-method-decl: ( objc-type-name ) objc-selector objc-selector ( objc-type-name ) objc-keyword-selector objc-optparmlist objc-keyword-selector objc-optparmlist objc-keyword-selector: objc-keyword-decl objc-keyword-selector objc-keyword-decl objc-keyword-decl: objc-selector : ( objc-type-name ) identifier objc-selector : identifier : ( objc-type-name ) identifier : identifier objc-optparmlist: objc-optparms objc-optellipsis objc-optparms: empty objc-opt-parms , parameter-declaration objc-optellipsis: empty , ... */ static tree c_parser_objc_method_decl (c_parser *parser) { tree type = NULL_TREE; tree sel; tree parms = NULL_TREE; bool ellipsis = false; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); type = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } sel = c_parser_objc_selector (parser); /* If there is no selector, or a colon follows, we have an objc-keyword-selector. If there is a selector, and a colon does not follow, that selector ends the objc-method-decl. */ if (!sel || c_parser_next_token_is (parser, CPP_COLON)) { tree tsel = sel; tree list = NULL_TREE; while (true) { tree atype = NULL_TREE, id, keyworddecl; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) break; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); atype = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return error_mark_node; } id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); keyworddecl = objc_build_keyword_decl (tsel, atype, id); list = chainon (list, keyworddecl); tsel = c_parser_objc_selector (parser); if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } /* Parse the optional parameter list. Optional Objective-C method parameters follow the C syntax, and may include '...' to denote a variable number of arguments. */ parms = make_node (TREE_LIST); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_parm *parm; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis = true; c_parser_consume_token (parser); break; } parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) break; parms = chainon (parms, build_tree_list (NULL_TREE, grokparm (parm))); } sel = list; } return objc_build_method_signature (type, sel, parms, ellipsis); } /* Parse an objc-type-name. objc-type-name: objc-type-qualifiers[opt] type-name objc-type-qualifiers[opt] objc-type-qualifiers: objc-type-qualifier objc-type-qualifiers objc-type-qualifier objc-type-qualifier: one of in out inout bycopy byref oneway */ static tree c_parser_objc_type_name (c_parser *parser) { tree quals = NULL_TREE; struct c_type_name *type_name = NULL; tree type = NULL_TREE; while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_KEYWORD && (token->keyword == RID_IN || token->keyword == RID_OUT || token->keyword == RID_INOUT || token->keyword == RID_BYCOPY || token->keyword == RID_BYREF || token->keyword == RID_ONEWAY)) { quals = chainon (quals, build_tree_list (NULL_TREE, token->value)); c_parser_consume_token (parser); } else break; } if (c_parser_next_token_starts_typename (parser)) type_name = c_parser_type_name (parser); if (type_name) type = groktypename (type_name, NULL, NULL); return build_tree_list (quals, type); } /* Parse objc-protocol-refs. objc-protocol-refs: < identifier-list > */ static tree c_parser_objc_protocol_refs (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is (parser, CPP_LESS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_require (parser, CPP_GREATER, "expected %<>%>"); return list; } /* Parse an objc-try-catch-statement. objc-try-catch-statement: @try compound-statement objc-catch-list[opt] @try compound-statement objc-catch-list[opt] @finally compound-statement objc-catch-list: @catch ( parameter-declaration ) compound-statement objc-catch-list @catch ( parameter-declaration ) compound-statement */ static void c_parser_objc_try_catch_statement (c_parser *parser) { location_t loc; tree stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TRY)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; stmt = c_parser_compound_statement (parser); objc_begin_try_stmt (loc, stmt); while (c_parser_next_token_is_keyword (parser, RID_CATCH)) { struct c_parm *parm; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) break; parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); objc_begin_catch_clause (grokparm (parm)); if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) c_parser_compound_statement_nostart (parser); objc_finish_catch_clause (); } if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY)) { location_t finloc; tree finstmt; c_parser_consume_token (parser); finloc = c_parser_peek_token (parser)->location; finstmt = c_parser_compound_statement (parser); objc_build_finally_clause (finloc, finstmt); } objc_finish_try_stmt (); } /* Parse an objc-synchronized-statement. objc-synchronized-statement: @synchronized ( expression ) compound-statement */ static void c_parser_objc_synchronized_statement (c_parser *parser) { location_t loc; tree expr, stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr = c_parser_expression (parser).value; expr = c_fully_fold (expr, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else expr = error_mark_node; stmt = c_parser_compound_statement (parser); objc_build_synchronized (loc, expr, stmt); } /* Parse an objc-selector; return NULL_TREE without an error if the next token is not an objc-selector. objc-selector: identifier one of enum struct union if else while do for switch case default break continue return goto asm sizeof typeof __alignof unsigned long const short volatile signed restrict _Complex in out inout bycopy byref oneway int char float double void _Bool ??? Why this selection of keywords but not, for example, storage class specifiers? */ static tree c_parser_objc_selector (c_parser *parser) { c_token *token = c_parser_peek_token (parser); tree value = token->value; if (token->type == CPP_NAME) { c_parser_consume_token (parser); return value; } if (token->type != CPP_KEYWORD) return NULL_TREE; switch (token->keyword) { case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_IF: case RID_ELSE: case RID_WHILE: case RID_DO: case RID_FOR: case RID_SWITCH: case RID_CASE: case RID_DEFAULT: case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: case RID_ASM: case RID_SIZEOF: case RID_TYPEOF: case RID_ALIGNOF: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_SHORT: case RID_VOLATILE: case RID_SIGNED: case RID_RESTRICT: case RID_COMPLEX: case RID_IN: case RID_OUT: case RID_INOUT: case RID_BYCOPY: case RID_BYREF: case RID_ONEWAY: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_BOOL: c_parser_consume_token (parser); return value; default: return NULL_TREE; } } /* Parse an objc-selector-arg. objc-selector-arg: objc-selector objc-keywordname-list objc-keywordname-list: objc-keywordname objc-keywordname-list objc-keywordname objc-keywordname: objc-selector : : */ static tree c_parser_objc_selector_arg (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return list; list = chainon (list, build_tree_list (sel, NULL_TREE)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-receiver. objc-receiver: expression class-name type-name */ static tree c_parser_objc_receiver (c_parser *parser) { if (c_parser_peek_token (parser)->type == CPP_NAME && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); return objc_get_class_reference (id); } return c_fully_fold (c_parser_expression (parser).value, false, NULL); } /* Parse objc-message-args. objc-message-args: objc-selector objc-keywordarg-list objc-keywordarg-list: objc-keywordarg objc-keywordarg-list objc-keywordarg objc-keywordarg: objc-selector : objc-keywordexpr : objc-keywordexpr */ static tree c_parser_objc_message_args (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { tree keywordexpr; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return error_mark_node; keywordexpr = c_parser_objc_keywordexpr (parser); list = chainon (list, build_tree_list (sel, keywordexpr)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-keywordexpr. objc-keywordexpr: nonempty-expr-list */ static tree c_parser_objc_keywordexpr (c_parser *parser) { tree ret; VEC(tree,gc) *expr_list = c_parser_expr_list (parser, true, true, NULL); if (VEC_length (tree, expr_list) == 1) { /* Just return the expression, remove a level of indirection. */ ret = VEC_index (tree, expr_list, 0); } else { /* We have a comma expression, we will collapse later. */ ret = build_tree_list_vec (expr_list); } release_tree_vector (expr_list); return ret; } /* Handle pragmas. Some OpenMP pragmas are associated with, and therefore should be considered, statements. ALLOW_STMT is true if we're within the context of a function and such pragmas are to be allowed. Returns true if we actually parsed such a pragma. */ static bool c_parser_pragma (c_parser *parser, enum pragma_context context) { unsigned int id; id = c_parser_peek_token (parser)->pragma_kind; gcc_assert (id != PRAGMA_NONE); switch (id) { case PRAGMA_OMP_BARRIER: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp barrier%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_barrier (parser); return false; case PRAGMA_OMP_FLUSH: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp flush%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_flush (parser); return false; case PRAGMA_OMP_TASKWAIT: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%<#pragma omp taskwait%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_taskwait (parser); return false; case PRAGMA_OMP_THREADPRIVATE: c_parser_omp_threadprivate (parser); return false; case PRAGMA_OMP_SECTION: error_at (c_parser_peek_token (parser)->location, "%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_GCC_PCH_PREPROCESS: c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; default: if (id < PRAGMA_FIRST_EXTERNAL) { if (context == pragma_external) { bad_stmt: c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_omp_construct (parser); return true; } break; } c_parser_consume_pragma (parser); c_invoke_pragma_handler (id); /* Skip to EOL, but suppress any error message. Those will have been generated by the handler routine through calling error, as opposed to calling c_parser_error. */ parser->error = true; c_parser_skip_to_pragma_eol (parser); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { c_token *tok = c_parser_peek_token (the_parser); enum cpp_ttype ret = tok->type; *value = tok->value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else { if (ret == CPP_KEYWORD) ret = CPP_NAME; c_parser_consume_token (the_parser); } return ret; } static void c_parser_pragma_pch_preprocess (c_parser *parser) { tree name = NULL; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_STRING)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else c_parser_error (parser, "expected string literal"); c_parser_skip_to_pragma_eol (parser); if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); } /* OpenMP 2.5 parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause c_parser_omp_clause_name (c_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (c_parser_next_token_is_keyword (parser, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'c': if (!strcmp ("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; break; case 'f': if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; break; case 'n': if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'p': if (!strcmp ("private", p)) result = PRAGMA_OMP_CLAUSE_PRIVATE; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; break; case 'u': if (!strcmp ("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) c_parser_consume_token (parser); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum omp_clause_code code, const char *name) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { location_t loc = OMP_CLAUSE_LOCATION (c); error_at (loc, "too many %qs clauses", name); break; } } /* OpenMP 2.5: variable-list: identifier variable-list , identifier If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is nonzero, CLAUSE_LOC is the location of the clause. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree c_parser_omp_variable_list (c_parser *parser, location_t clause_loc, enum omp_clause_code kind, tree list) { if (c_parser_next_token_is_not (parser, CPP_NAME) || c_parser_peek_token (parser)->id_kind != C_ID_ID) c_parser_error (parser, "expected identifier"); while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree t = lookup_name (c_parser_peek_token (parser)->value); if (t == NULL_TREE) undeclared_variable (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value); else if (t == error_mark_node) ; else if (kind != 0) { tree u = build_omp_clause (clause_loc, kind); OMP_CLAUSE_DECL (u) = t; OMP_CLAUSE_CHAIN (u) = list; list = u; } else list = tree_cons (t, NULL_TREE, list); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for omp clauses. */ static tree c_parser_omp_var_list_parens (c_parser *parser, enum omp_clause_code kind, tree list) { /* The clauses location. */ location_t loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { list = c_parser_omp_variable_list (parser, loc, kind, list); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenMP 3.0: collapse ( constant-expression ) */ static tree c_parser_omp_clause_collapse (c_parser *parser, tree list) { tree c, num = error_mark_node; HOST_WIDE_INT n; location_t loc; check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse"); loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { num = c_parser_expr_no_commas (parser, NULL).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } if (num == error_mark_node) return list; if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !host_integerp (num, 0) || (n = tree_low_cst (num, 0)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); return list; } c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = num; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: copyin ( variable-list ) */ static tree c_parser_omp_clause_copyin (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list); } /* OpenMP 2.5: copyprivate ( variable-list ) */ static tree c_parser_omp_clause_copyprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list); } /* OpenMP 2.5: default ( shared | none ) */ static tree c_parser_omp_clause_default (c_parser *parser, tree list) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; location_t loc = c_parser_peek_token (parser)->location; tree c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } c_parser_consume_token (parser); } else { invalid_kind: c_parser_error (parser, "expected %<none%> or %<shared%>"); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default"); c = build_omp_clause (loc, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 2.5: firstprivate ( variable-list ) */ static tree c_parser_omp_clause_firstprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list); } /* OpenMP 2.5: if ( expression ) */ static tree c_parser_omp_clause_if (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree t = c_parser_paren_condition (parser); tree c; check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if"); c = build_omp_clause (loc, OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } else c_parser_error (parser, "expected %<(%>"); return list; } /* OpenMP 2.5: lastprivate ( variable-list ) */ static tree c_parser_omp_clause_lastprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list); } /* OpenMP 2.5: nowait */ static tree c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; location_t loc = c_parser_peek_token (parser)->location; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait"); c = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: num_threads ( expression ) */ static tree c_parser_omp_clause_num_threads (c_parser *parser, tree list) { location_t num_threads_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_threads%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads"); c = build_omp_clause (num_threads_loc, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 2.5: ordered */ static tree c_parser_omp_clause_ordered (c_parser *parser, tree list) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: private ( variable-list ) */ static tree c_parser_omp_clause_private (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list); } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || */ static tree c_parser_omp_clause_reduction (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { enum tree_code code; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; default: c_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, or %<||%>"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) { tree nl, c; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_REDUCTION, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_REDUCTION_CODE (c) = code; list = nl; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime | auto */ static tree c_parser_omp_clause_schedule (c_parser *parser, tree list) { tree c, t; location_t loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; c = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE); if (c_parser_next_token_is (parser, CPP_NAME)) { tree kind = c_parser_peek_token (parser)->value; const char *p = IDENTIFIER_POINTER (kind); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (c_parser_next_token_is_keyword (parser, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (c_parser_next_token_is_keyword (parser, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) { location_t here; c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; t = c_parser_expr_no_commas (parser, NULL).value; t = c_fully_fold (t, false, NULL); if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at (here, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at (here, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE) OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; else c_parser_error (parser, "expected integer expression"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: c_parser_error (parser, "invalid schedule kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } /* OpenMP 2.5: shared ( variable-list ) */ static tree c_parser_omp_clause_shared (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list); } /* OpenMP 3.0: untied */ static tree c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; /* FIXME: Should we allow duplicates? */ check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found; the result of clause default goes in *pdefault. */ static tree c_parser_omp_all_clauses (c_parser *parser, unsigned int mask, const char *where) { tree clauses = NULL; bool first = true; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); first = false; here = c_parser_peek_token (parser)->location; c_kind = c_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = c_parser_omp_clause_collapse (parser, clauses); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = c_parser_omp_clause_copyin (parser, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = c_parser_omp_clause_copyprivate (parser, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = c_parser_omp_clause_default (parser, clauses); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate (parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = c_parser_omp_clause_if (parser, clauses); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = c_parser_omp_clause_lastprivate (parser, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = c_parser_omp_clause_nowait (parser, clauses); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = c_parser_omp_clause_num_threads (parser, clauses); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = c_parser_omp_clause_ordered (parser, clauses); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private (parser, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = c_parser_omp_clause_schedule (parser, clauses); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = c_parser_omp_clause_shared (parser, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = c_parser_omp_clause_untied (parser, clauses); c_name = "untied"; break; default: c_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } if (((mask >> c_kind) & 1) == 0 && !parser->error) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (here, "%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol (parser); return c_finish_omp_clauses (clauses); } /* OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that c_parser_statement calls add_stmt. */ static tree c_parser_omp_structured_block (c_parser *parser) { tree stmt = push_stmt_list (); c_parser_statement (parser); return pop_stmt_list (stmt); } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. LOC is the location of the #pragma token. */ static void c_parser_omp_atomic (location_t loc, c_parser *parser) { tree lhs, rhs; tree stmt; enum tree_code code; struct c_expr rhs_expr; c_parser_skip_to_pragma_eol (parser); lhs = c_parser_unary_expression (parser).value; lhs = c_fully_fold (lhs, false, NULL); switch (TREE_CODE (lhs)) { case ERROR_MARK: saw_error: c_parser_skip_to_end_of_block_or_statement (parser); return; case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = PLUS_EXPR; rhs = integer_one_node; break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR && TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0) && TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE (lhs) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep (TREE_OPERAND (lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); code = NOP_EXPR; break; } if (TREE_CODE (TREE_OPERAND (lhs, 1)) == TRUTH_NOT_EXPR && TREE_OPERAND (lhs, 0) == TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) { /* This is pre or post decrement. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); code = NOP_EXPR; break; } } /* FALLTHRU */ default: switch (c_parser_peek_token (parser)->type) { case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; default: c_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } c_parser_consume_token (parser); { location_t rhs_loc = c_parser_peek_token (parser)->location; rhs_expr = c_parser_expression (parser); rhs_expr = default_function_array_conversion (rhs_loc, rhs_expr); } rhs = rhs_expr.value; rhs = c_fully_fold (rhs, false, NULL); break; } stmt = c_finish_omp_atomic (loc, code, lhs, rhs); if (stmt != error_mark_node) add_stmt (stmt); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* OpenMP 2.5: # pragma omp barrier new-line */ static void c_parser_omp_barrier (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_barrier (loc); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block LOC is the location of the #pragma itself. */ static tree c_parser_omp_critical (location_t loc, c_parser *parser) { tree stmt, name = NULL; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_error (parser, "expected identifier"); } else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); stmt = c_parser_omp_structured_block (parser); return c_finish_omp_critical (loc, stmt, name); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void c_parser_omp_flush (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); c_finish_omp_flush (loc); } /* Parse the restricted form of the for statement allowed by OpenMP. The real trick here is to determine the loop control variable early so that we can push a new decl if necessary to make it private. LOC is the location of the OMP in "#pragma omp". */ static tree c_parser_omp_for_loop (location_t loc, c_parser *parser, tree clauses, tree *par_clauses) { tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl; tree declv, condv, incrv, initv, for_block = NULL, ret = NULL; bool fail = false, open_brace_parsed = false; int i, collapse = 1, nbraces = 0; location_t for_loc; for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); gcc_assert (collapse >= 1); declv = make_tree_vec (collapse); initv = make_tree_vec (collapse); condv = make_tree_vec (collapse); incrv = make_tree_vec (collapse); if (!c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_error (parser, "for statement expected"); return NULL; } for_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); for (i = 0; i < collapse; i++) { int bracecount = 0; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto pop_scopes; /* Parse the initialization declaration or expression. */ if (c_parser_next_token_starts_declspecs (parser)) { if (i > 0) for_block = tree_cons (NULL, c_begin_compound_stmt (true), for_block); c_parser_declaration_or_fndef (parser, true, true, true, true); decl = check_for_loop_decls (for_loc); if (decl == NULL) goto error_init; if (DECL_INITIAL (decl) == error_mark_node) decl = error_mark_node; init = decl; } else if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_EQ) { struct c_expr decl_exp; struct c_expr init_exp; location_t init_loc; decl_exp = c_parser_postfix_expression (parser); decl = decl_exp.value; c_parser_require (parser, CPP_EQ, "expected %<=%>"); init_loc = c_parser_peek_token (parser)->location; init_exp = c_parser_expr_no_commas (parser, NULL); init_exp = default_function_array_conversion (init_loc, init_exp); init = build_modify_expr (init_loc, decl, decl_exp.original_type, NOP_EXPR, init_loc, init_exp.value, init_exp.original_type); init = c_process_expr_stmt (init_loc, init); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } else { error_init: c_parser_error (parser, "expected iteration declaration or initialization"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); fail = true; goto parse_next; } /* Parse the loop condition. */ cond = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_SEMICOLON)) { location_t cond_loc = c_parser_peek_token (parser)->location; struct c_expr cond_expr = c_parser_binary_expression (parser, NULL); cond = cond_expr.value; cond = c_objc_common_truthvalue_conversion (cond_loc, cond); cond = c_fully_fold (cond, false, NULL); switch (cond_expr.original_code) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; default: /* Can't be cond = error_mark_node, because we want to preserve the location until c_finish_omp_for. */ cond = build1 (NOP_EXPR, boolean_type_node, error_mark_node); break; } protected_set_expr_location (cond, cond_loc); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); /* Parse the increment expression. */ incr = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)) { location_t incr_loc = c_parser_peek_token (parser)->location; incr = c_process_expr_stmt (incr_loc, c_parser_expression (parser).value); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (decl == NULL || decl == error_mark_node || init == error_mark_node) fail = true; else { TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; } parse_next: if (i == collapse - 1) break; /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed in between the collapsed for loops to be still considered perfectly nested. Hopefully the final version clarifies this. For now handle (multiple) {'s and empty statements. */ do { if (c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_consume_token (parser); break; } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_consume_token (parser); bracecount++; } else if (bracecount && c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "not enough perfectly nested loops"); if (bracecount) { open_brace_parsed = true; bracecount--; } fail = true; collapse = 0; break; } } while (1); nbraces += bracecount; } save_break = c_break_label; c_break_label = size_one_node; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = push_stmt_list (); if (open_brace_parsed) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); add_stmt (c_end_compound_stmt (here, stmt, true)); } else add_stmt (c_parser_c99_block_statement (parser)); if (c_cont_label) { tree t = build1 (LABEL_EXPR, void_type_node, c_cont_label); SET_EXPR_LOCATION (t, loc); add_stmt (t); } body = pop_stmt_list (body); c_break_label = save_break; c_cont_label = save_cont; while (nbraces) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); nbraces--; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "collapsed loops not perfectly nested"); while (nbraces) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); add_stmt (body); c_parser_compound_statement_nostart (parser); body = c_end_compound_stmt (here, stmt, true); nbraces--; } goto pop_scopes; } } /* Only bother calling c_finish_omp_for if we haven't already generated an error from the initialization parsing. */ if (!fail) { stmt = c_finish_omp_for (loc, declv, initv, condv, incrv, body, NULL); if (stmt) { if (par_clauses != NULL) { tree *c; for (c = par_clauses; *c ; ) if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE) c = &OMP_CLAUSE_CHAIN (*c); else { for (i = 0; i < collapse; i++) if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c)) break; if (i == collapse) c = &OMP_CLAUSE_CHAIN (*c); else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE) { error_at (loc, "iteration variable %qD should not be firstprivate", OMP_CLAUSE_DECL (*c)); *c = OMP_CLAUSE_CHAIN (*c); } else { /* Copy lastprivate (decl) clause to OMP_FOR_CLAUSES, change it to shared (decl) in OMP_PARALLEL_CLAUSES. */ tree l = build_omp_clause (OMP_CLAUSE_LOCATION (*c), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (l) = OMP_CLAUSE_DECL (*c); OMP_CLAUSE_CHAIN (l) = clauses; clauses = l; OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED); } } } OMP_FOR_CLAUSES (stmt) = clauses; } ret = stmt; } pop_scopes: while (for_block) { /* FIXME diagnostics: LOC below should be the actual location of this particular for block. We need to build a list of locations to go along with FOR_BLOCK. */ stmt = c_end_compound_stmt (loc, TREE_VALUE (for_block), true); add_stmt (stmt); for_block = TREE_CHAIN (for_block); } return ret; } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop LOC is the location of the #pragma token. */ #define OMP_FOR_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_ORDERED) \ | (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (1u << PRAGMA_OMP_CLAUSE_COLLAPSE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_for (location_t loc, c_parser *parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK, "#pragma omp for"); block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, clauses, NULL); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block LOC is the location of the #pragma token. */ static tree c_parser_omp_master (location_t loc, c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_master (loc, c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block LOC is the location of the #pragma itself. */ static tree c_parser_omp_ordered (location_t loc, c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_ordered (loc, c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block SECTIONS_LOC is the location of the #pragma omp sections. */ static tree c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser) { tree stmt, substmt; bool error_suppress = false; location_t loc; loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Avoid skipping until the end of the block. */ parser->error = false; return NULL_TREE; } stmt = push_stmt_list (); if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION) { substmt = push_stmt_list (); while (1) { c_parser_statement (parser); if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; } substmt = pop_stmt_list (substmt); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } while (1) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) { c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); error_suppress = false; } else if (!error_suppress) { error_at (loc, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = c_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<#pragma omp section%> or %<}%>"); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); SET_EXPR_LOCATION (stmt, sections_loc); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; return add_stmt (stmt); } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope LOC is the location of the #pragma token. */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_sections (location_t loc, c_parser *parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK, "#pragma omp sections"); block = c_begin_compound_stmt (true); ret = c_parser_omp_sections_scope (loc, parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma parallel parallel-clause new-line # pragma parallel for parallel-for-clause new-line # pragma parallel sections parallel-sections-clause new-line LOC is the location of the #pragma token. */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_COPYIN) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS)) static tree c_parser_omp_parallel (location_t loc, c_parser *parser) { enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL; const char *p_name = "#pragma omp parallel"; tree stmt, clauses, par_clause, ws_clause, block; unsigned int mask = OMP_PARALLEL_CLAUSE_MASK; if (c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_consume_token (parser); p_kind = PRAGMA_OMP_PARALLEL_FOR; p_name = "#pragma omp parallel for"; mask |= OMP_FOR_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "sections") == 0) { c_parser_consume_token (parser); p_kind = PRAGMA_OMP_PARALLEL_SECTIONS; p_name = "#pragma omp parallel sections"; mask |= OMP_SECTIONS_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } } clauses = c_parser_omp_all_clauses (parser, mask, p_name); switch (p_kind) { case PRAGMA_OMP_PARALLEL: block = c_begin_omp_parallel (); c_parser_statement (parser); stmt = c_finish_omp_parallel (loc, clauses, block); break; case PRAGMA_OMP_PARALLEL_FOR: block = c_begin_omp_parallel (); c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); c_parser_omp_for_loop (loc, parser, ws_clause, &par_clause); stmt = c_finish_omp_parallel (loc, par_clause, block); OMP_PARALLEL_COMBINED (stmt) = 1; break; case PRAGMA_OMP_PARALLEL_SECTIONS: block = c_begin_omp_parallel (); c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); stmt = c_parser_omp_sections_scope (loc, parser); if (stmt) OMP_SECTIONS_CLAUSES (stmt) = ws_clause; stmt = c_finish_omp_parallel (loc, par_clause, block); OMP_PARALLEL_COMBINED (stmt) = 1; break; default: gcc_unreachable (); } return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block LOC is the location of the #pragma. */ #define OMP_SINGLE_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_single (location_t loc, c_parser *parser) { tree stmt = make_node (OMP_SINGLE); SET_EXPR_LOCATION (stmt, loc); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single"); OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 3.0: # pragma omp task task-clause[optseq] new-line LOC is the location of the #pragma. */ #define OMP_TASK_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_UNTIED) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED)) static tree c_parser_omp_task (location_t loc, c_parser *parser) { tree clauses, block; clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK, "#pragma omp task"); block = c_begin_omp_task (); c_parser_statement (parser); return c_finish_omp_task (loc, clauses, block); } /* OpenMP 3.0: # pragma omp taskwait new-line */ static void c_parser_omp_taskwait (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_taskwait (loc); } /* Main entry point to parsing most OpenMP pragmas. */ static void c_parser_omp_construct (c_parser *parser) { enum pragma_kind p_kind; location_t loc; tree stmt; loc = c_parser_peek_token (parser)->location; p_kind = c_parser_peek_token (parser)->pragma_kind; c_parser_consume_pragma (parser); switch (p_kind) { case PRAGMA_OMP_ATOMIC: c_parser_omp_atomic (loc, parser); return; case PRAGMA_OMP_CRITICAL: stmt = c_parser_omp_critical (loc, parser); break; case PRAGMA_OMP_FOR: stmt = c_parser_omp_for (loc, parser); break; case PRAGMA_OMP_MASTER: stmt = c_parser_omp_master (loc, parser); break; case PRAGMA_OMP_ORDERED: stmt = c_parser_omp_ordered (loc, parser); break; case PRAGMA_OMP_PARALLEL: stmt = c_parser_omp_parallel (loc, parser); break; case PRAGMA_OMP_SECTIONS: stmt = c_parser_omp_sections (loc, parser); break; case PRAGMA_OMP_SINGLE: stmt = c_parser_omp_single (loc, parser); break; case PRAGMA_OMP_TASK: stmt = c_parser_omp_task (loc, parser); break; default: gcc_unreachable (); } if (stmt) gcc_assert (EXPR_LOCATION (stmt) != UNKNOWN_LOCATION); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void c_parser_omp_threadprivate (c_parser *parser) { tree vars, t; location_t loc; c_parser_consume_pragma (parser); loc = c_parser_peek_token (parser)->location; vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* FIXME diagnostics: Ideally we should keep individual locations for all the variables in the var list to make the following errors more precise. Perhaps c_parser_omp_var_list_parens() should construct a list of locations to go along with the var list. */ /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ if (TREE_CODE (v) != VAR_DECL) error_at (loc, "%qD is not a variable", v); else if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v)) error_at (loc, "%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error_at (loc, "automatic variable %qE cannot be %<threadprivate%>", v); else if (TREE_TYPE (v) == error_mark_node) ; else if (! COMPLETE_TYPE_P (TREE_TYPE (v))) error_at (loc, "%<threadprivate%> %qE has incomplete type", v); else { if (! DECL_THREAD_LOCAL_P (v)) { DECL_TLS_MODEL (v) = decl_default_tls_model (v); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } C_DECL_THREADPRIVATE_P (v) = 1; } } c_parser_skip_to_pragma_eol (parser); } /* Parse a single source file. */ void c_parse_file (void) { /* Use local storage to begin. If the first token is a pragma, parse it. If it is #pragma GCC pch_preprocess, then this will load a PCH file which will cause garbage collection. */ c_parser tparser; memset (&tparser, 0, sizeof tparser); the_parser = &tparser; if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS) c_parser_pragma_pch_preprocess (&tparser); the_parser = GGC_NEW (c_parser); *the_parser = tparser; /* Initialize EH, if we've been told to do so. */ if (flag_exceptions) using_eh_for_cleanups (); c_parser_translation_unit (the_parser); the_parser = NULL; } #include "gt-c-parser.h" #ifdef __cplusplus } /* extern "C" */ #endif
/* TODO: Make sure all relevant comments, and all relevant code from all actions, brought over from old parser. Verify exact correspondence of syntax accepted. Add testcases covering every input symbol in every state in old and new parsers. Include full syntax for GNU C, including erroneous cases accepted with error messages, in syntax productions in comments. Make more diagnostics in the front end generally take an explicit location rather than implicitly using input_location. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "langhooks.h" #include "input.h" #include "cpplib.h" #include "timevar.h" #include "c-pragma.h" #include "c-tree.h" #include "flags.h" #include "output.h" #include "toplev.h" #include "ggc.h" #include "c-common.h" #include "vec.h" #include "target.h" #include "cgraph.h" #include "plugin.h" #include "except.h" #ifdef __cplusplus extern "C" { #endif /* Initialization routine for this file. */ void c_parse_init (void) { /* The only initialization required is of the reserved word identifiers. */ unsigned int i; tree id; int mask = 0; /* Make sure RID_MAX hasn't grown past the 8 bits used to hold the keyword in the c_token structure. */ gcc_assert (RID_MAX <= 255); mask |= D_CXXONLY; if (!flag_isoc99) mask |= D_C99; if (flag_no_asm) { mask |= D_ASM | D_EXT; if (!flag_isoc99) mask |= D_EXT89; } if (!c_dialect_objc ()) mask |= D_OBJC | D_CXX_OBJC; ridpointers = GGC_CNEWVEC (tree, (int) RID_MAX); for (i = 0; i < num_c_common_reswords; i++) { /* If a keyword is disabled, do not enter it into the table and so create a canonical spelling that isn't a keyword. */ if (c_common_reswords[i].disable & mask) { if (warn_cxx_compat && (c_common_reswords[i].disable & D_CXXWARN)) { id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, RID_CXX_COMPAT_WARN); C_IS_RESERVED_WORD (id) = 1; } continue; } id = get_identifier (c_common_reswords[i].word); C_SET_RID_CODE (id, c_common_reswords[i].rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [(int) c_common_reswords[i].rid] = id; } } /* The C lexer intermediates between the lexer in cpplib and c-lex.c and the C parser. Unlike the C++ lexer, the parser structure stores the lexer information instead of using a separate structure. Identifiers are separated into ordinary identifiers, type names, keywords and some other Objective-C types of identifiers, and some look-ahead is maintained. ??? It might be a good idea to lex the whole file up front (as for C++). It would then be possible to share more of the C and C++ lexer code, if desired. */ /* The following local token type is used. */ /* A keyword. */ #define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1)) /* More information about the type of a CPP_NAME token. */ typedef enum c_id_kind { /* An ordinary identifier. */ C_ID_ID, /* An identifier declared as a typedef name. */ C_ID_TYPENAME, /* An identifier declared as an Objective-C class name. */ C_ID_CLASSNAME, /* An address space identifier. */ C_ID_ADDRSPACE, /* Not an identifier. */ C_ID_NONE } c_id_kind; /* A single C token after string literal concatenation and conversion of preprocessing tokens to tokens. */ typedef struct GTY (()) c_token { /* The kind of token. */ ENUM_BITFIELD (cpp_ttype, type, 8); /* If this token is a CPP_NAME, this value indicates whether also declared as some kind of type. Otherwise, it is C_ID_NONE. */ ENUM_BITFIELD (c_id_kind, id_kind, 8); /* If this token is a keyword, this value indicates which keyword. Otherwise, this value is RID_MAX. */ ENUM_BITFIELD (rid, keyword, 8); /* If this token is a CPP_PRAGMA, this indicates the pragma that was seen. Otherwise it is PRAGMA_NONE. */ ENUM_BITFIELD (pragma_kind, pragma_kind, 8); /* The value associated with this token, if any. */ tree value; /* The location at which this token was found. */ location_t location; } c_token; /* A parser structure recording information about the state and context of parsing. Includes lexer information with up to two tokens of look-ahead; more are not needed for C. */ typedef struct GTY(()) c_parser { /* The look-ahead tokens. */ c_token tokens[2]; /* How many look-ahead tokens are available (0, 1 or 2). */ short tokens_avail; /* True if a syntax error is being recovered from; false otherwise. c_parser_error sets this flag. It should clear this flag when enough tokens have been consumed to recover from the error. */ BOOL_BITFIELD error : 1; /* True if we're processing a pragma, and shouldn't automatically consume CPP_PRAGMA_EOL. */ BOOL_BITFIELD in_pragma : 1; /* True if we're parsing the outermost block of an if statement. */ BOOL_BITFIELD in_if_block : 1; /* True if we want to lex an untranslated string. */ BOOL_BITFIELD lex_untranslated_string : 1; /* Objective-C specific parser/lexer information. */ BOOL_BITFIELD objc_pq_context : 1; /* The following flag is needed to contextualize Objective-C lexical analysis. In some cases (e.g., 'int NSObject;'), it is undesirable to bind an identifier to an Objective-C class, even if a class with that name exists. */ BOOL_BITFIELD objc_need_raw_identifier : 1; } c_parser; /* The actual parser and external interface. ??? Does this need to be garbage-collected? */ static GTY (()) c_parser *the_parser; /* Read in and lex a single token, storing it in *TOKEN. */ static void c_lex_one_token (c_parser *parser, c_token *token) { timevar_push (TV_LEX); token->type = c_lex_with_flags (&token->value, &token->location, NULL, (parser->lex_untranslated_string ? C_LEX_STRING_NO_TRANSLATE : 0)); token->id_kind = C_ID_NONE; token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; switch (token->type) { case CPP_NAME: { tree decl; bool objc_force_identifier = parser->objc_need_raw_identifier; if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; if (C_IS_RESERVED_WORD (token->value)) { enum rid rid_code = C_RID_CODE (token->value); if (rid_code == RID_CXX_COMPAT_WARN) { warning_at (token->location, OPT_Wc___compat, "identifier %qE conflicts with C++ keyword", token->value); } else if (rid_code >= RID_FIRST_ADDR_SPACE && rid_code <= RID_LAST_ADDR_SPACE) { token->id_kind = C_ID_ADDRSPACE; token->keyword = rid_code; break; } else if (c_dialect_objc ()) { if (!objc_is_reserved_word (token->value) && (!OBJC_IS_PQ_KEYWORD (rid_code) || parser->objc_pq_context)) { /* Return the canonical spelling for this keyword. */ token->value = ridpointers[(int) rid_code]; token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } else { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } decl = lookup_name (token->value); if (decl) { if (TREE_CODE (decl) == TYPE_DECL) { token->id_kind = C_ID_TYPENAME; break; } } else if (c_dialect_objc ()) { tree objc_interface_decl = objc_is_class_name (token->value); /* Objective-C class names are in the same namespace as variables and typedefs, and hence are shadowed by local declarations. */ if (objc_interface_decl && (global_bindings_p () || (!objc_force_identifier && !decl))) { token->value = objc_interface_decl; token->id_kind = C_ID_CLASSNAME; break; } } token->id_kind = C_ID_ID; } break; case CPP_AT_NAME: /* This only happens in Objective-C; it must be a keyword. */ token->type = CPP_KEYWORD; token->keyword = C_RID_CODE (token->value); break; case CPP_COLON: case CPP_COMMA: case CPP_CLOSE_PAREN: case CPP_SEMICOLON: /* These tokens may affect the interpretation of any identifiers following, if doing Objective-C. */ if (c_dialect_objc ()) parser->objc_need_raw_identifier = false; break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value); token->value = NULL; break; default: break; } timevar_pop (TV_LEX); } /* Return a pointer to the next token from PARSER, reading it in if necessary. */ static inline c_token * c_parser_peek_token (c_parser *parser) { if (parser->tokens_avail == 0) { c_lex_one_token (parser, &parser->tokens[0]); parser->tokens_avail = 1; } return &parser->tokens[0]; } /* Return true if the next token from PARSER has the indicated TYPE. */ static inline bool c_parser_next_token_is (c_parser *parser, enum cpp_ttype type) { return c_parser_peek_token (parser)->type == type; } /* Return true if the next token from PARSER does not have the indicated TYPE. */ static inline bool c_parser_next_token_is_not (c_parser *parser, enum cpp_ttype type) { return !c_parser_next_token_is (parser, type); } /* Return true if the next token from PARSER is the indicated KEYWORD. */ static inline bool c_parser_next_token_is_keyword (c_parser *parser, enum rid keyword) { return c_parser_peek_token (parser)->keyword == keyword; } /* Return true if TOKEN can start a type name, false otherwise. */ static bool c_token_starts_typename (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: return true; default: return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if the next token from PARSER can start a type name, false otherwise. */ static inline bool c_parser_next_token_starts_typename (c_parser *parser) { c_token *token = c_parser_peek_token (parser); return c_token_starts_typename (token); } /* Return true if TOKEN can start declaration specifiers, false otherwise. */ static bool c_token_starts_declspecs (c_token *token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert (c_dialect_objc ()); return true; default: gcc_unreachable (); } case CPP_KEYWORD: switch (token->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: return true; default: return false; } case CPP_LESS: if (c_dialect_objc ()) return true; return false; default: return false; } } /* Return true if the next token from PARSER can start declaration specifiers, false otherwise. */ static inline bool c_parser_next_token_starts_declspecs (c_parser *parser) { c_token *token = c_parser_peek_token (parser); return c_token_starts_declspecs (token); } /* Return a pointer to the next-but-one token from PARSER, reading it in if necessary. The next token is already read in. */ static c_token * c_parser_peek_2nd_token (c_parser *parser) { if (parser->tokens_avail >= 2) return &parser->tokens[1]; gcc_assert (parser->tokens_avail == 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL); c_lex_one_token (parser, &parser->tokens[1]); parser->tokens_avail = 2; return &parser->tokens[1]; } /* Consume the next token from PARSER. */ static void c_parser_consume_token (c_parser *parser) { gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type != CPP_EOF); gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL); gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; } /* Expect the current token to be a #pragma. Consume it and remember that we've begun parsing a pragma. */ static void c_parser_consume_pragma (c_parser *parser) { gcc_assert (!parser->in_pragma); gcc_assert (parser->tokens_avail >= 1); gcc_assert (parser->tokens[0].type == CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; parser->in_pragma = true; } /* Update the globals input_location and in_system_header from TOKEN. */ static inline void c_parser_set_source_position_from_token (c_token *token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* Issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream of PARSER. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". Do not issue a diagnostic if still recovering from an error. ??? This is taken from the C++ parser, but building up messages in this way is not i18n-friendly and some other approach should be used. */ static void c_parser_error (c_parser *parser, const char *gmsgid) { c_token *token = c_parser_peek_token (parser); if (parser->error) return; parser->error = true; if (!gmsgid) return; /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ c_parser_set_source_position_from_token (token); c_parse_error (gmsgid, /* Because c_parse_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), /* ??? The C parser does not save the cpp flags of a token, we need to pass 0 here and we will not get the source spelling of some tokens but rather the canonical spelling. */ token->value, /*flags=*/0); } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue the error MSGID. If MSGID is NULL then a message has already been produced and no message will be produced this time. Returns true if found, false otherwise. */ static bool c_parser_require (c_parser *parser, enum cpp_ttype type, const char *msgid) { if (c_parser_next_token_is (parser, type)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* If the next token is the indicated keyword, consume it. Otherwise, issue the error MSGID. Returns true if found, false otherwise. */ static bool c_parser_require_keyword (c_parser *parser, enum rid keyword, const char *msgid) { if (c_parser_next_token_is_keyword (parser, keyword)) { c_parser_consume_token (parser); return true; } else { c_parser_error (parser, msgid); return false; } } /* Like c_parser_require, except that tokens will be skipped until the desired token is found. An error message is still produced if the next token is not as expected. If MSGID is NULL then a message has already been produced and no message will be produced this time. */ static void c_parser_skip_until_found (c_parser *parser, enum cpp_ttype type, const char *msgid) { unsigned nesting_depth = 0; if (c_parser_require (parser, type, msgid)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ c_token *token = c_parser_peek_token (parser); /* If we've reached the token we want, consume it and stop. */ if (token->type == type && !nesting_depth) { c_parser_consume_token (parser); break; } /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until the end of a parameter is found, but do not consume the comma, semicolon or closing delimiter. */ static void c_parser_skip_to_end_of_parameter (c_parser *parser) { unsigned nesting_depth = 0; while (true) { c_token *token = c_parser_peek_token (parser); if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON) && !nesting_depth) break; /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token (parser); } parser->error = false; } /* Expect to be at the end of the pragma directive and consume an end of line marker. */ static void c_parser_skip_to_pragma_eol (c_parser *parser) { gcc_assert (parser->in_pragma); parser->in_pragma = false; if (!c_parser_require (parser, CPP_PRAGMA_EOL, "expected end of line")) while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_EOF) break; if (token->type == CPP_PRAGMA_EOL) { c_parser_consume_token (parser); break; } c_parser_consume_token (parser); } parser->error = false; } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested ';'. */ static void c_parser_skip_to_end_of_block_or_statement (c_parser *parser) { unsigned nesting_depth = 0; bool save_error = parser->error; while (true) { c_token *token; /* Peek at the next token. */ token = c_parser_peek_token (parser); switch (token->type) { case CPP_EOF: return; case CPP_PRAGMA_EOL: if (parser->in_pragma) return; break; case CPP_SEMICOLON: /* If the next token is a ';', we have reached the end of the statement. */ if (!nesting_depth) { /* Consume the ';'. */ c_parser_consume_token (parser); goto finished; } break; case CPP_CLOSE_BRACE: /* If the next token is a non-nested '}', then we have reached the end of the current block. */ if (nesting_depth == 0 || --nesting_depth == 0) { c_parser_consume_token (parser); goto finished; } break; case CPP_OPEN_BRACE: /* If it the next token is a '{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; case CPP_PRAGMA: /* If we see a pragma, consume the whole thing at once. We have some safeguards against consuming pragmas willy-nilly. Normally, we'd expect to be here with parser->error set, which disables these safeguards. But it's possible to get here for secondary error recovery, after parser->error has been cleared. */ c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); parser->error = save_error; continue; default: break; } c_parser_consume_token (parser); } finished: parser->error = false; } /* CPP's options (initialized by c-opts.c). */ extern cpp_options *cpp_opts; /* Save the warning flags which are controlled by __extension__. */ static inline int disable_extension_diagnostics (void) { int ret = (pedantic | (warn_pointer_arith << 1) | (warn_traditional << 2) | (flag_iso << 3) | (warn_long_long << 4) | (warn_cxx_compat << 5)); cpp_opts->pedantic = pedantic = 0; warn_pointer_arith = 0; cpp_opts->warn_traditional = warn_traditional = 0; flag_iso = 0; cpp_opts->warn_long_long = warn_long_long = 0; warn_cxx_compat = 0; return ret; } /* Restore the warning flags which are controlled by __extension__. FLAGS is the return value from disable_extension_diagnostics. */ static inline void restore_extension_diagnostics (int flags) { cpp_opts->pedantic = pedantic = flags & 1; warn_pointer_arith = (flags >> 1) & 1; cpp_opts->warn_traditional = warn_traditional = (flags >> 2) & 1; flag_iso = (flags >> 3) & 1; cpp_opts->warn_long_long = warn_long_long = (flags >> 4) & 1; warn_cxx_compat = (flags >> 5) & 1; } /* Possibly kinds of declarator to parse. */ typedef enum c_dtr_syn { /* A normal declarator with an identifier. */ C_DTR_NORMAL, /* An abstract declarator (maybe empty). */ C_DTR_ABSTRACT, /* A parameter declarator: may be either, but after a type name does not redeclare a typedef name as an identifier if it can alternatively be interpreted as a typedef name; see DR#009, applied in C90 TC1, omitted from C99 and reapplied in C99 TC2 following DR#249. For example, given a typedef T, "int T" and "int *T" are valid parameter declarations redeclaring T, while "int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are abstract declarators rather than involving redundant parentheses; the same applies with attributes inside the parentheses before "T". */ C_DTR_PARM } c_dtr_syn; static void c_parser_external_declaration (c_parser *); static void c_parser_asm_definition (c_parser *); static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool, bool); static void c_parser_declspecs (c_parser *, struct c_declspecs *, bool, bool, bool); static struct c_typespec c_parser_enum_specifier (c_parser *); static struct c_typespec c_parser_struct_or_union_specifier (c_parser *); static tree c_parser_struct_declaration (c_parser *); static struct c_typespec c_parser_typeof_specifier (c_parser *); static struct c_declarator *c_parser_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator (c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator_inner (c_parser *, bool, struct c_declarator *); static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree); static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree); static struct c_parm *c_parser_parameter_declaration (c_parser *, tree); static tree c_parser_simple_asm_expr (c_parser *); static tree c_parser_attributes (c_parser *); static struct c_type_name *c_parser_type_name (c_parser *); static struct c_expr c_parser_initializer (c_parser *); static struct c_expr c_parser_braced_init (c_parser *, tree, bool); static void c_parser_initelt (c_parser *); static void c_parser_initval (c_parser *, struct c_expr *); static tree c_parser_compound_statement (c_parser *); static void c_parser_compound_statement_nostart (c_parser *); static void c_parser_label (c_parser *); static void c_parser_statement (c_parser *); static void c_parser_statement_after_labels (c_parser *); static void c_parser_if_statement (c_parser *); static void c_parser_switch_statement (c_parser *); static void c_parser_while_statement (c_parser *); static void c_parser_do_statement (c_parser *); static void c_parser_for_statement (c_parser *); static tree c_parser_asm_statement (c_parser *); static tree c_parser_asm_operands (c_parser *, bool); static tree c_parser_asm_goto_operands (c_parser *); static tree c_parser_asm_clobbers (c_parser *); static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *); static struct c_expr c_parser_conditional_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *); static struct c_expr c_parser_unary_expression (c_parser *); static struct c_expr c_parser_sizeof_expression (c_parser *); static struct c_expr c_parser_alignof_expression (c_parser *); static struct c_expr c_parser_postfix_expression (c_parser *); static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *, struct c_type_name *, location_t); static struct c_expr c_parser_postfix_expression_after_primary (c_parser *, location_t loc, struct c_expr); static struct c_expr c_parser_expression (c_parser *); static struct c_expr c_parser_expression_conv (c_parser *); static VEC(tree,gc) *c_parser_expr_list (c_parser *, bool, bool, VEC(tree,gc) **); static void c_parser_omp_construct (c_parser *); static void c_parser_omp_threadprivate (c_parser *); static void c_parser_omp_barrier (c_parser *); static void c_parser_omp_flush (c_parser *); static void c_parser_omp_taskwait (c_parser *); enum pragma_context { pragma_external, pragma_stmt, pragma_compound }; static bool c_parser_pragma (c_parser *, enum pragma_context); /* These Objective-C parser functions are only ever called when compiling Objective-C. */ static void c_parser_objc_class_definition (c_parser *); static void c_parser_objc_class_instance_variables (c_parser *); static void c_parser_objc_class_declaration (c_parser *); static void c_parser_objc_alias_declaration (c_parser *); static void c_parser_objc_protocol_definition (c_parser *); static enum tree_code c_parser_objc_method_type (c_parser *); static void c_parser_objc_method_definition (c_parser *); static void c_parser_objc_methodprotolist (c_parser *); static void c_parser_objc_methodproto (c_parser *); static tree c_parser_objc_method_decl (c_parser *); static tree c_parser_objc_type_name (c_parser *); static tree c_parser_objc_protocol_refs (c_parser *); static void c_parser_objc_try_catch_statement (c_parser *); static void c_parser_objc_synchronized_statement (c_parser *); static tree c_parser_objc_selector (c_parser *); static tree c_parser_objc_selector_arg (c_parser *); static tree c_parser_objc_receiver (c_parser *); static tree c_parser_objc_message_args (c_parser *); static tree c_parser_objc_keywordexpr (c_parser *); /* Parse a translation unit (C90 6.7, C99 6.9). translation-unit: external-declarations external-declarations: external-declaration external-declarations external-declaration GNU extensions: translation-unit: empty */ static void c_parser_translation_unit (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_EOF)) { pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C forbids an empty translation unit"); } else { void *obstack_position = obstack_alloc (&parser_obstack, 0); mark_valid_location_for_stdc_pragma (false); do { ggc_collect (); c_parser_external_declaration (parser); obstack_free (&parser_obstack, obstack_position); } while (c_parser_next_token_is_not (parser, CPP_EOF)); } } /* Parse an external declaration (C90 6.7, C99 6.9). external-declaration: function-definition declaration GNU extensions: external-declaration: asm-definition ; __extension__ external-declaration Objective-C: external-declaration: objc-class-definition objc-class-declaration objc-alias-declaration objc-protocol-definition objc-method-definition @end */ static void c_parser_external_declaration (c_parser *parser) { int ext; switch (c_parser_peek_token (parser)->type) { case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_EXTENSION: ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_external_declaration (parser); restore_extension_diagnostics (ext); break; case RID_ASM: c_parser_asm_definition (parser); break; case RID_AT_INTERFACE: case RID_AT_IMPLEMENTATION: gcc_assert (c_dialect_objc ()); c_parser_objc_class_definition (parser); break; case RID_CLASS: gcc_assert (c_dialect_objc ()); c_parser_objc_class_declaration (parser); break; case RID_AT_ALIAS: gcc_assert (c_dialect_objc ()); c_parser_objc_alias_declaration (parser); break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_objc_protocol_definition (parser); break; case RID_AT_END: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); objc_finish_implementation (); break; default: goto decl_or_fndef; } break; case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PRAGMA: mark_valid_location_for_stdc_pragma (true); c_parser_pragma (parser, pragma_external); mark_valid_location_for_stdc_pragma (false); break; case CPP_PLUS: case CPP_MINUS: if (c_dialect_objc ()) { c_parser_objc_method_definition (parser); break; } /* Else fall through, and yield a syntax error trying to parse as a declaration or function definition. */ default: decl_or_fndef: /* A declaration or a function definition. We can only tell which after parsing the declaration specifiers, if any, and the first declarator. */ c_parser_declaration_or_fndef (parser, true, true, false, true); break; } } /* Parse a declaration or function definition (C90 6.5, 6.7.1, C99 6.7, 6.9.1). If FNDEF_OK is true, a function definition is accepted; otherwise (old-style parameter declarations) only other declarations are accepted. If NESTED is true, we are inside a function or parsing old-style parameter declarations; any functions encountered are nested functions and declaration specifiers are required; otherwise we are at top level and functions are normal functions and declaration specifiers may be optional. If EMPTY_OK is true, empty declarations are OK (subject to all other constraints); otherwise (old-style parameter declarations) they are diagnosed. If START_ATTR_OK is true, the declaration specifiers may start with attributes; otherwise they may not. declaration: declaration-specifiers init-declarator-list[opt] ; function-definition: declaration-specifiers[opt] declarator declaration-list[opt] compound-statement declaration-list: declaration declaration-list declaration init-declarator-list: init-declarator init-declarator-list , init-declarator init-declarator: declarator simple-asm-expr[opt] attributes[opt] declarator simple-asm-expr[opt] attributes[opt] = initializer GNU extensions: nested-function-definition: declaration-specifiers declarator declaration-list[opt] compound-statement The simple-asm-expr and attributes are GNU extensions. This function does not handle __extension__; that is handled in its callers. ??? Following the old parser, __extension__ may start external declarations, declarations in functions and declarations at the start of "for" loops, but not old-style parameter declarations. C99 requires declaration specifiers in a function definition; the absence is diagnosed through the diagnosis of implicit int. In GNU C we also allow but diagnose declarations without declaration specifiers, but only at top level (elsewhere they conflict with other syntax). OpenMP: declaration: threadprivate-directive */ static void c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok, bool empty_ok, bool nested, bool start_attr_ok) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; bool diagnosed_no_specs = false; location_t here = c_parser_peek_token (parser)->location; specs = build_null_declspecs (); c_parser_declspecs (parser, specs, true, true, start_attr_ok); if (parser->error) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (nested && !specs->declspecs_seen_p) { c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_to_end_of_block_or_statement (parser); return; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { if (empty_ok) shadow_tag (specs); else { shadow_tag_warned (specs, 1); pedwarn (here, 0, "empty declaration"); } c_parser_consume_token (parser); return; } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; while (true) { struct c_declarator *declarator; bool dummy = false; tree fnbody; /* Declaring either one or more declarators (in which case we should diagnose if there were no declaration specifiers) or a function definition (in which case the diagnostic for implicit int suffices). */ declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); return; } if (c_parser_next_token_is (parser, CPP_EQ) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is_keyword (parser, RID_ASM) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree asm_name = NULL_TREE; tree postfix_attrs = NULL_TREE; if (!diagnosed_no_specs && !specs->declspecs_seen_p) { diagnosed_no_specs = true; pedwarn (here, 0, "data definition has no type or storage class"); } /* Having seen a data definition, there cannot now be a function definition. */ fndef_ok = false; if (c_parser_next_token_is_keyword (parser, RID_ASM)) asm_name = c_parser_simple_asm_expr (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { tree d; struct c_expr init; location_t init_loc; c_parser_consume_token (parser); /* The declaration of the variable is in effect while its initializer is parsed. */ d = start_decl (declarator, specs, true, chainon (postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; start_init (d, asm_name, global_bindings_p ()); init_loc = c_parser_peek_token (parser)->location; init = c_parser_initializer (parser); finish_init (); if (d != error_mark_node) { maybe_warn_string_init (TREE_TYPE (d), init); finish_decl (d, init_loc, init.value, init.original_type, asm_name); } } else { tree d = start_decl (declarator, specs, false, chainon (postfix_attrs, all_prefix_attrs)); if (d) finish_decl (d, UNKNOWN_LOCATION, NULL_TREE, NULL_TREE, asm_name); } if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; continue; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); return; } else { c_parser_error (parser, "expected %<,%> or %<;%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } } else if (!fndef_ok) { c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, " "%<asm%> or %<__attribute__%>"); c_parser_skip_to_end_of_block_or_statement (parser); return; } /* Function definition (nested or otherwise). */ if (nested) { pedwarn (here, OPT_pedantic, "ISO C forbids nested functions"); c_push_function_context (); } if (!start_function (specs, declarator, all_prefix_attrs)) { /* This can appear in many cases looking nothing like a function definition, so we don't give a more specific error suggesting there was one. */ c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> " "or %<__attribute__%>"); if (nested) c_pop_function_context (); break; } /* Parse old-style parameter declarations. ??? Attributes are not allowed to start declaration specifiers here because of a syntax conflict between a function declaration with attribute suffix and a function definition with an attribute prefix on first old-style parameter declaration. Following the old parser, they are not accepted on subsequent old-style parameter declarations either. However, there is no ambiguity after the first declaration, nor indeed on the first as long as we don't allow postfix attributes after a declarator with a nonempty identifier list in a definition; and postfix attributes have never been accepted here in function definitions either. */ while (c_parser_next_token_is_not (parser, CPP_EOF) && c_parser_next_token_is_not (parser, CPP_OPEN_BRACE)) c_parser_declaration_or_fndef (parser, false, false, true, false); store_parm_decls (); DECL_STRUCT_FUNCTION (current_function_decl)->function_start_locus = c_parser_peek_token (parser)->location; fnbody = c_parser_compound_statement (parser); if (nested) { tree decl = current_function_decl; /* Mark nested functions as needing static-chain initially. lower_nested_functions will recompute it but the DECL_STATIC_CHAIN flag is also used before that happens, by initializer_constant_valid_p. See gcc.dg/nested-fn-2.c. */ DECL_STATIC_CHAIN (decl) = 1; add_stmt (fnbody); finish_function (); c_pop_function_context (); add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } else { add_stmt (fnbody); finish_function (); } break; } } /* Parse an asm-definition (asm() outside a function body). This is a GNU extension. asm-definition: simple-asm-expr ; */ static void c_parser_asm_definition (c_parser *parser) { tree asm_str = c_parser_simple_asm_expr (parser); if (asm_str) cgraph_add_asm_node (asm_str); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse some declaration specifiers (possibly none) (C90 6.5, C99 6.7), adding them to SPECS (which may already include some). Storage class specifiers are accepted iff SCSPEC_OK; type specifiers are accepted iff TYPESPEC_OK; attributes are accepted at the start iff START_ATTR_OK. declaration-specifiers: storage-class-specifier declaration-specifiers[opt] type-specifier declaration-specifiers[opt] type-qualifier declaration-specifiers[opt] function-specifier declaration-specifiers[opt] Function specifiers (inline) are from C99, and are currently handled as storage class specifiers, as is __thread. C90 6.5.1, C99 6.7.1: storage-class-specifier: typedef extern static auto register C99 6.7.4: function-specifier: inline C90 6.5.2, C99 6.7.2: type-specifier: void char short int long float double signed unsigned _Bool _Complex [_Imaginary removed in C99 TC2] struct-or-union-specifier enum-specifier typedef-name (_Bool and _Complex are new in C99.) C90 6.5.3, C99 6.7.3: type-qualifier: const restrict volatile address-space-qualifier (restrict is new in C99.) GNU extensions: declaration-specifiers: attributes declaration-specifiers[opt] type-qualifier: address-space address-space: identifier recognized by the target storage-class-specifier: __thread type-specifier: typeof-specifier _Decimal32 _Decimal64 _Decimal128 _Fract _Accum _Sat (_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf) Objective-C: type-specifier: class-name objc-protocol-refs[opt] typedef-name objc-protocol-refs objc-protocol-refs */ static void c_parser_declspecs (c_parser *parser, struct c_declspecs *specs, bool scspec_ok, bool typespec_ok, bool start_attr_ok) { bool attrs_ok = start_attr_ok; bool seen_type = specs->type_seen_p; while (c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD) || (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS))) { struct c_typespec t; tree attrs; location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_NAME)) { tree value = c_parser_peek_token (parser)->value; c_id_kind kind = c_parser_peek_token (parser)->id_kind; if (kind == C_ID_ADDRSPACE) { addr_space_t as = c_parser_peek_token (parser)->keyword - RID_FIRST_ADDR_SPACE; declspecs_add_addrspace (specs, as); c_parser_consume_token (parser); attrs_ok = true; continue; } /* This finishes the specifiers unless a type name is OK, it is declared as a type name and a type name hasn't yet been seen. */ if (!typespec_ok || seen_type || (kind != C_ID_TYPENAME && kind != C_ID_CLASSNAME)) break; c_parser_consume_token (parser); seen_type = true; attrs_ok = true; if (kind == C_ID_TYPENAME && (!c_dialect_objc () || c_parser_next_token_is_not (parser, CPP_LESS))) { t.kind = ctsk_typedef; /* For a typedef name, record the meaning, not the name. In case of 'foo foo, bar;'. */ t.spec = lookup_name (value); t.expr = NULL_TREE; t.expr_const_operands = true; } else { tree proto = NULL_TREE; gcc_assert (c_dialect_objc ()); t.kind = ctsk_objc; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); t.spec = objc_get_protocol_qualified_type (value, proto); t.expr = NULL_TREE; t.expr_const_operands = true; } declspecs_add_type (loc, specs, t); continue; } if (c_parser_next_token_is (parser, CPP_LESS)) { /* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" - nisse@lysator.liu.se. */ tree proto; gcc_assert (c_dialect_objc ()); if (!typespec_ok || seen_type) break; proto = c_parser_objc_protocol_refs (parser); t.kind = ctsk_objc; t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto); t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); continue; } gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD)); switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: if (!scspec_ok) goto out; attrs_ok = true; /* TODO: Distinguish between function specifiers (inline) and storage class specifiers, either here or in declspecs_add_scspec. */ declspecs_add_scspec (specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; if (c_dialect_objc ()) parser->objc_need_raw_identifier = true; t.kind = ctsk_resword; t.spec = c_parser_peek_token (parser)->value; t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type (loc, specs, t); c_parser_consume_token (parser); break; case RID_ENUM: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_enum_specifier (parser); declspecs_add_type (loc, specs, t); break; case RID_STRUCT: case RID_UNION: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_struct_or_union_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec); declspecs_add_type (loc, specs, t); break; case RID_TYPEOF: /* ??? The old parser rejected typeof after other type specifiers, but is a syntax error the best way of handling this? */ if (!typespec_ok || seen_type) goto out; attrs_ok = true; seen_type = true; t = c_parser_typeof_specifier (parser); declspecs_add_type (loc, specs, t); break; case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: attrs_ok = true; declspecs_add_qual (specs, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_ATTRIBUTE: if (!attrs_ok) goto out; attrs = c_parser_attributes (parser); declspecs_add_attrs (specs, attrs); break; default: goto out; } } out: ; } /* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2). enum-specifier: enum attributes[opt] identifier[opt] { enumerator-list } attributes[opt] enum attributes[opt] identifier[opt] { enumerator-list , } attributes[opt] enum attributes[opt] identifier The form with trailing comma is new in C99. The forms with attributes are GNU extensions. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. enumerator-list: enumerator enumerator-list , enumerator enumerator: enumeration-constant enumeration-constant = constant-expression */ static struct c_typespec c_parser_enum_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t enum_loc; location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */ gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM)); enum_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); enum_loc = c_parser_peek_token (parser)->location; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; enum_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse an enum definition. */ struct c_enum_contents the_enum; tree type = start_enum (enum_loc, &the_enum, ident); tree postfix_attrs; /* We chain the enumerators in reverse order, then put them in forward order at the end. */ tree values = NULL_TREE; c_parser_consume_token (parser); while (true) { tree enum_id; tree enum_value; tree enum_decl; bool seen_comma; c_token *token; location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */ location_t value_loc; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } token = c_parser_peek_token (parser); enum_id = token->value; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (token); value_loc = token->location; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_EQ)) { c_parser_consume_token (parser); value_loc = c_parser_peek_token (parser)->location; enum_value = c_parser_expr_no_commas (parser, NULL).value; } else enum_value = NULL_TREE; enum_decl = build_enumerator (value_loc, &the_enum, enum_id, enum_value); TREE_CHAIN (enum_decl) = values; values = enum_decl; seen_comma = false; if (c_parser_next_token_is (parser, CPP_COMMA)) { comma_loc = c_parser_peek_token (parser)->location; seen_comma = true; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { if (seen_comma && !flag_isoc99) pedwarn (comma_loc, OPT_pedantic, "comma at end of enumerator list"); c_parser_consume_token (parser); break; } if (!seen_comma) { c_parser_error (parser, "expected %<,%> or %<}%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_enum (type, nreverse (values), chainon (attrs, postfix_attrs)); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, ENUMERAL_TYPE, ident); /* In ISO C, enumerated types can be referred to only if already defined. */ if (pedantic && !COMPLETE_TYPE_P (ret.spec)) { gcc_assert (ident); pedwarn (enum_loc, OPT_pedantic, "ISO C forbids forward references to %<enum%> types"); } return ret; } /* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1). struct-or-union-specifier: struct-or-union attributes[opt] identifier[opt] { struct-contents } attributes[opt] struct-or-union attributes[opt] identifier struct-contents: struct-declaration-list struct-declaration-list: struct-declaration ; struct-declaration-list struct-declaration ; GNU extensions: struct-contents: empty struct-declaration struct-declaration-list struct-declaration struct-declaration-list: struct-declaration-list ; ; (Note that in the syntax here, unlike that in ISO C, the semicolons are included here rather than in struct-declaration, in order to describe the syntax with extra semicolons and missing semicolon at end.) Objective-C: struct-declaration-list: @defs ( class-name ) (Note this does not include a trailing semicolon, but can be followed by further declarations, and gets a pedwarn-if-pedantic when followed by a semicolon.) */ static struct c_typespec c_parser_struct_or_union_specifier (c_parser *parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t struct_loc; location_t ident_loc = UNKNOWN_LOCATION; enum tree_code code; switch (c_parser_peek_token (parser)->keyword) { case RID_STRUCT: code = RECORD_TYPE; break; case RID_UNION: code = UNION_TYPE; break; default: gcc_unreachable (); } struct_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token (c_parser_peek_token (parser)); if (c_parser_next_token_is (parser, CPP_NAME)) { ident = c_parser_peek_token (parser)->value; ident_loc = c_parser_peek_token (parser)->location; struct_loc = ident_loc; c_parser_consume_token (parser); } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { /* Parse a struct or union definition. Start the scope of the tag before parsing components. */ struct c_struct_parse_info *struct_info; tree type = start_struct (struct_loc, code, ident, &struct_info); tree postfix_attrs; /* We chain the components in reverse order, then put them in forward order at the end. Each struct-declaration may declare multiple components (comma-separated), so we must use chainon to join them, although when parsing each struct-declaration we can use TREE_CHAIN directly. The theory behind all this is that there will be more semicolon separated fields than comma separated fields, and so we'll be minimizing the number of node traversals required by chainon. */ tree contents = NULL_TREE; c_parser_consume_token (parser); /* Handle the Objective-C @defs construct, e.g. foo(sizeof(struct{ @defs(ClassName) }));. */ if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS)) { tree name; gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto end_at_defs; if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else { c_parser_error (parser, "expected class name"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto end_at_defs; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); contents = nreverse (objc_get_class_ivars (name)); } end_at_defs: /* Parse the struct-declarations and semicolons. Problems with semicolons are diagnosed here; empty structures are diagnosed elsewhere. */ while (true) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the struct or union contents. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Accept #pragmas at struct scope. */ if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external); continue; } /* Parse some comma-separated declarations, but not the trailing semicolon if any. */ decls = c_parser_struct_declaration (parser); contents = chainon (decls, contents); /* If no semicolon follows, either we have a parse error or are at the end of the struct or union and should pedwarn. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) pedwarn (c_parser_peek_token (parser)->location, 0, "no semicolon at end of struct or union"); else { c_parser_error (parser, "expected %<;%>"); c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); break; } } } postfix_attrs = c_parser_attributes (parser); ret.spec = finish_struct (struct_loc, type, nreverse (contents), chainon (attrs, postfix_attrs), struct_info); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } else if (!ident) { c_parser_error (parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag (ident_loc, code, ident); return ret; } /* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without* the trailing semicolon. struct-declaration: specifier-qualifier-list struct-declarator-list specifier-qualifier-list: type-specifier specifier-qualifier-list[opt] type-qualifier specifier-qualifier-list[opt] attributes specifier-qualifier-list[opt] struct-declarator-list: struct-declarator struct-declarator-list , attributes[opt] struct-declarator struct-declarator: declarator attributes[opt] declarator[opt] : constant-expression attributes[opt] GNU extensions: struct-declaration: __extension__ struct-declaration specifier-qualifier-list Unlike the ISO C syntax, semicolons are handled elsewhere. The use of attributes where shown is a GNU extension. In GNU C, we accept any expression without commas in the syntax (assignment expressions, not just conditional expressions); assignment expressions will be diagnosed as non-constant. */ static tree c_parser_struct_declaration (c_parser *parser) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; tree decls; location_t decl_loc; if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { int ext; tree decl; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); decl = c_parser_struct_declaration (parser); restore_extension_diagnostics (ext); return decl; } specs = build_null_declspecs (); decl_loc = c_parser_peek_token (parser)->location; c_parser_declspecs (parser, specs, false, true, true); if (parser->error) return NULL_TREE; if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL_TREE; } finish_declspecs (specs); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree ret; if (!specs->type_seen_p) { pedwarn (decl_loc, OPT_pedantic, "ISO C forbids member declarations with no members"); shadow_tag_warned (specs, pedantic); ret = NULL_TREE; } else { /* Support for unnamed structs or unions as members of structs or unions (which is [a] useful and [b] supports MS P-SDK). */ tree attrs = NULL; ret = grokfield (c_parser_peek_token (parser)->location, build_id_declarator (NULL_TREE), specs, NULL_TREE, &attrs); if (ret) decl_attributes (&ret, attrs, 0); } return ret; } pending_xref_error (); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; decls = NULL_TREE; while (true) { /* Declaring one or more declarators or un-named bit-fields. */ struct c_declarator *declarator; bool dummy = false; if (c_parser_next_token_is (parser, CPP_COLON)) declarator = build_id_declarator (NULL_TREE); else declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement (parser); break; } if (c_parser_next_token_is (parser, CPP_COLON) || c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE) || c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { tree postfix_attrs = NULL_TREE; tree width = NULL_TREE; tree d; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); width = c_parser_expr_no_commas (parser, NULL).value; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); d = grokfield (c_parser_peek_token (parser)->location, declarator, specs, width, &all_prefix_attrs); decl_attributes (&d, chainon (postfix_attrs, all_prefix_attrs), 0); TREE_CHAIN (d) = decls; decls = d; if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon (c_parser_attributes (parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else if (c_parser_next_token_is (parser, CPP_SEMICOLON) || c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { /* Semicolon consumed in caller. */ break; } else { c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>"); break; } } else { c_parser_error (parser, "expected %<:%>, %<,%>, %<;%>, %<}%> or " "%<__attribute__%>"); break; } } return decls; } /* Parse a typeof specifier (a GNU extension). typeof-specifier: typeof ( expression ) typeof ( type-name ) */ static struct c_typespec c_parser_typeof_specifier (c_parser *parser) { struct c_typespec ret; ret.kind = ctsk_typeof; ret.spec = error_mark_node; ret.expr = NULL_TREE; ret.expr_const_operands = true; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_typeof++; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_inhibit_evaluation_warnings--; in_typeof--; return ret; } if (c_parser_next_token_starts_typename (parser)) { struct c_type_name *type = c_parser_type_name (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (type != NULL) { ret.spec = groktypename (type, &ret.expr, &ret.expr_const_operands); pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE)); } } else { bool was_vm; location_t here = c_parser_peek_token (parser)->location; struct c_expr expr = c_parser_expression (parser); c_inhibit_evaluation_warnings--; in_typeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (here, "%<typeof%> applied to a bit-field"); ret.spec = TREE_TYPE (expr.value); was_vm = variably_modified_type_p (ret.spec, NULL_TREE); /* This is returned with the type so that when the type is evaluated, this can be evaluated. */ if (was_vm) ret.expr = c_fully_fold (expr.value, false, &ret.expr_const_operands); pop_maybe_used (was_vm); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return ret; } /* Parse a declarator, possibly an abstract declarator (C90 6.5.4, 6.5.5, C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may be redeclared; otherwise it may not. KIND indicates which kind of declarator is wanted. Returns a valid declarator except in the case of a syntax error in which case NULL is returned. *SEEN_ID is set to true if an identifier being declared is seen; this is used to diagnose bad forms of abstract array declarators and to determine whether an identifier list is syntactically permitted. declarator: pointer[opt] direct-declarator direct-declarator: identifier ( attributes[opt] declarator ) direct-declarator array-declarator direct-declarator ( parameter-type-list ) direct-declarator ( identifier-list[opt] ) pointer: * type-qualifier-list[opt] * type-qualifier-list[opt] pointer type-qualifier-list: type-qualifier attributes type-qualifier-list type-qualifier type-qualifier-list attributes parameter-type-list: parameter-list parameter-list , ... parameter-list: parameter-declaration parameter-list , parameter-declaration parameter-declaration: declaration-specifiers declarator attributes[opt] declaration-specifiers abstract-declarator[opt] attributes[opt] identifier-list: identifier identifier-list , identifier abstract-declarator: pointer pointer[opt] direct-abstract-declarator direct-abstract-declarator: ( attributes[opt] abstract-declarator ) direct-abstract-declarator[opt] array-declarator direct-abstract-declarator[opt] ( parameter-type-list[opt] ) GNU extensions: direct-declarator: direct-declarator ( parameter-forward-declarations parameter-type-list[opt] ) direct-abstract-declarator: direct-abstract-declarator[opt] ( parameter-forward-declarations parameter-type-list[opt] ) parameter-forward-declarations: parameter-list ; parameter-forward-declarations parameter-list ; The uses of attributes shown above are GNU extensions. Some forms of array declarator are not included in C99 in the syntax for abstract declarators; these are disallowed elsewhere. This may be a defect (DR#289). This function also accepts an omitted abstract declarator as being an abstract declarator, although not part of the formal syntax. */ static struct c_declarator * c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* Parse any initial pointer part. */ if (c_parser_next_token_is (parser, CPP_MULT)) { struct c_declspecs *quals_attrs = build_null_declspecs (); struct c_declarator *inner; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true); inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner == NULL) return NULL; else return make_pointer_declarator (quals_attrs, inner); } /* Now we have a direct declarator, direct abstract declarator or nothing (which counts as a direct abstract declarator here). */ return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id); } /* Parse a direct declarator or direct abstract declarator; arguments as c_parser_declarator. */ static struct c_declarator * c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind, bool *seen_id) { /* The direct declarator must start with an identifier (possibly omitted) or a parenthesized declarator (possibly abstract). In an ordinary declarator, initial parentheses must start a parenthesized declarator. In an abstract declarator or parameter declarator, they could start a parenthesized declarator or a parameter list. To tell which, the open parenthesis and any following attributes must be read. If a declaration specifier follows, then it is a parameter list; if the specifier is a typedef name, there might be an ambiguity about redeclaring it, which is resolved in the direction of treating it as a typedef name. If a close parenthesis follows, it is also an empty parameter list, as the syntax does not permit empty abstract declarators. Otherwise, it is a parenthesized declarator (in which case the analysis may be repeated inside it, recursively). ??? There is an ambiguity in a parameter declaration "int (__attribute__((foo)) x)", where x is not a typedef name: it could be an abstract declarator for a function, or declare x with parentheses. The proper resolution of this ambiguity needs documenting. At present we follow an accident of the old parser's implementation, whereby the first parameter must have some declaration specifiers other than just attributes. Thus as a parameter declaration it is treated as a parenthesized parameter named x, and as an abstract declarator it is rejected. ??? Also following the old parser, attributes inside an empty parameter list are ignored, making it a list not yielding a prototype, rather than giving an error or making it have one parameter with implicit type int. ??? Also following the old parser, typedef names may be redeclared in declarators, but not Objective-C class names. */ if (kind != C_DTR_ABSTRACT && c_parser_next_token_is (parser, CPP_NAME) && ((type_seen_p && c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME) || c_parser_peek_token (parser)->id_kind == C_ID_ID)) { struct c_declarator *inner = build_id_declarator (c_parser_peek_token (parser)->value); *seen_id = true; inner->id_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } if (kind != C_DTR_NORMAL && c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { struct c_declarator *inner = build_id_declarator (NULL_TREE); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } /* Either we are at the end of an abstract declarator, or we have parentheses. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_declarator *inner; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); if (kind != C_DTR_NORMAL && (c_parser_next_token_starts_declspecs (parser) || c_parser_next_token_is (parser, CPP_CLOSE_PAREN))) { struct c_arg_info *args = c_parser_parms_declarator (parser, kind == C_DTR_NORMAL, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, build_id_declarator (NULL_TREE)); return c_parser_direct_declarator_inner (parser, *seen_id, inner); } } /* A parenthesized declarator. */ inner = c_parser_declarator (parser, type_seen_p, kind, seen_id); if (inner != NULL && attrs != NULL) inner = build_attrs_declarator (attrs, inner); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (inner == NULL) return NULL; else return c_parser_direct_declarator_inner (parser, *seen_id, inner); } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } else { if (kind == C_DTR_NORMAL) { c_parser_error (parser, "expected identifier or %<(%>"); return NULL; } else return build_id_declarator (NULL_TREE); } } /* Parse part of a direct declarator or direct abstract declarator, given that some (in INNER) has already been parsed; ID_PRESENT is true if an identifier is present, false for an abstract declarator. */ static struct c_declarator * c_parser_direct_declarator_inner (c_parser *parser, bool id_present, struct c_declarator *inner) { /* Parse a sequence of array declarators and parameter lists. */ if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { location_t brace_loc = c_parser_peek_token (parser)->location; struct c_declarator *declarator; struct c_declspecs *quals_attrs = build_null_declspecs (); bool static_seen; bool star_seen; tree dimen; c_parser_consume_token (parser); c_parser_declspecs (parser, quals_attrs, false, false, true); static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC); if (static_seen) c_parser_consume_token (parser); if (static_seen && !quals_attrs->declspecs_seen_p) c_parser_declspecs (parser, quals_attrs, false, false, true); if (!quals_attrs->declspecs_seen_p) quals_attrs = NULL; /* If "static" is present, there must be an array dimension. Otherwise, there may be a dimension, "*", or no dimension. */ if (static_seen) { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } else { if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { dimen = NULL_TREE; star_seen = false; } else if (c_parser_next_token_is (parser, CPP_MULT)) { if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE) { dimen = NULL_TREE; star_seen = true; c_parser_consume_token (parser); } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } } else { star_seen = false; dimen = c_parser_expr_no_commas (parser, NULL).value; } } if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) c_parser_consume_token (parser); else { c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return NULL; } declarator = build_array_declarator (brace_loc, dimen, quals_attrs, static_seen, star_seen); if (declarator == NULL) return NULL; inner = set_array_declarator_inner (declarator, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree attrs; struct c_arg_info *args; c_parser_consume_token (parser); attrs = c_parser_attributes (parser); args = c_parser_parms_declarator (parser, id_present, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator (args, inner); return c_parser_direct_declarator_inner (parser, id_present, inner); } } return inner; } /* Parse a parameter list or identifier list, including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. ID_LIST_OK is true if an identifier list is acceptable; such a list must not have attributes at the start. */ static struct c_arg_info * c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs) { push_scope (); declare_parm_level (); /* If the list starts with an identifier, it is an identifier list. Otherwise, it is either a prototype list or an empty list. */ if (id_list_ok && !attrs && c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree list = NULL_TREE, *nextp = &list; while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { *nextp = build_tree_list (NULL_TREE, c_parser_peek_token (parser)->value); nextp = & TREE_CHAIN (*nextp); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_error (parser, "expected identifier"); break; } } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = list; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token (parser); pop_scope (); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pop_scope (); return NULL; } } else { struct c_arg_info *ret = c_parser_parms_list_declarator (parser, attrs); pop_scope (); return ret; } } /* Parse a parameter list (possibly empty), including the closing parenthesis but not the opening one. ATTRS are the attributes at the start of the list. */ static struct c_arg_info * c_parser_parms_list_declarator (c_parser *parser, tree attrs) { bool good_parm = false; /* ??? Following the old parser, forward parameter declarations may use abstract declarators, and if no real parameter declarations follow the forward declarations then this is not diagnosed. Also note as above that attributes are ignored as the only contents of the parentheses, or as the only contents after forward declarations. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token (parser); return ret; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; /* Suppress -Wold-style-definition for this case. */ ret->types = error_mark_node; error_at (c_parser_peek_token (parser)->location, "ISO C requires a named argument before %<...%>"); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); return ret; } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } /* Nonempty list of parameters, either terminated with semicolon (forward declarations; recurse) or with close parenthesis (normal function) or with ", ... )" (variadic function). */ while (true) { /* Parse a parameter. */ struct c_parm *parm = c_parser_parameter_declaration (parser, attrs); attrs = NULL_TREE; if (parm != NULL) { good_parm = true; push_parm_decl (parm); } if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { tree new_attrs; c_parser_consume_token (parser); mark_forward_parm_decls (); new_attrs = c_parser_attributes (parser); return c_parser_parms_list_declarator (parser, new_attrs); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (good_parm) return get_parm_info (false); else { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } if (!c_parser_require (parser, CPP_COMMA, "expected %<;%>, %<,%> or %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); get_pending_sizes (); return NULL; } if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) { c_parser_consume_token (parser); if (good_parm) return get_parm_info (true); else { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } else { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); get_pending_sizes (); return NULL; } } } } /* Parse a parameter declaration. ATTRS are the attributes at the start of the declaration if it is the first parameter. */ static struct c_parm * c_parser_parameter_declaration (c_parser *parser, tree attrs) { struct c_declspecs *specs; struct c_declarator *declarator; tree prefix_attrs; tree postfix_attrs = NULL_TREE; bool dummy = false; if (!c_parser_next_token_starts_declspecs (parser)) { /* ??? In some Objective-C cases '...' isn't applicable so there should be a different message. */ c_parser_error (parser, "expected declaration specifiers or %<...%>"); c_parser_skip_to_end_of_parameter (parser); return NULL; } specs = build_null_declspecs (); if (attrs) { declspecs_add_attrs (specs, attrs); attrs = NULL_TREE; } c_parser_declspecs (parser, specs, true, true, true); finish_declspecs (specs); pending_xref_error (); prefix_attrs = specs->attrs; specs->attrs = NULL_TREE; declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_PARM, &dummy); if (declarator == NULL) { c_parser_skip_until_found (parser, CPP_COMMA, NULL); return NULL; } if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes (parser); return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs), declarator); } /* Parse a string literal in an asm expression. It should not be translated, and wide string literals are an error although permitted by the syntax. This is a GNU extension. asm-string-literal: string-literal ??? At present, following the old parser, the caller needs to have set lex_untranslated_string to 1. It would be better to follow the C++ parser rather than using this kludge. */ static tree c_parser_asm_string_literal (c_parser *parser) { tree str; if (c_parser_next_token_is (parser, CPP_STRING)) { str = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_WSTRING)) { error_at (c_parser_peek_token (parser)->location, "wide string literal in %<asm%>"); str = build_string (1, ""); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected string literal"); str = NULL_TREE; } return str; } /* Parse a simple asm expression. This is used in restricted contexts, where a full expression with inputs and outputs does not make sense. This is a GNU extension. simple-asm-expr: asm ( asm-string-literal ) */ static tree c_parser_simple_asm_expr (c_parser *parser) { tree str; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return NULL_TREE; } str = c_parser_asm_string_literal (parser); parser->lex_untranslated_string = false; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } return str; } /* Parse (possibly empty) attributes. This is a GNU extension. attributes: empty attributes attribute attribute: __attribute__ ( ( attribute-list ) ) attribute-list: attrib attribute_list , attrib attrib: empty any-word any-word ( identifier ) any-word ( identifier , nonempty-expr-list ) any-word ( expr-list ) where the "identifier" must not be declared as a type, and "any-word" may be any identifier (including one declared as a type), a reserved word storage class specifier, type specifier or type qualifier. ??? This still leaves out most reserved keywords (following the old parser), shouldn't we include them, and why not allow identifiers declared as types to start the arguments? */ static tree c_parser_attributes (c_parser *parser) { tree attrs = NULL_TREE; while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)) { /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return attrs; } if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return attrs; } /* Parse the attribute list. */ while (c_parser_next_token_is (parser, CPP_COMMA) || c_parser_next_token_is (parser, CPP_NAME) || c_parser_next_token_is (parser, CPP_KEYWORD)) { tree attr, attr_name, attr_args; VEC(tree,gc) *expr_list; if (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); continue; } if (c_parser_next_token_is (parser, CPP_KEYWORD)) { /* ??? See comment above about what keywords are accepted here. */ bool ok; switch (c_parser_peek_token (parser)->keyword) { case RID_STATIC: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_SHORT: case RID_INLINE: case RID_VOLATILE: case RID_SIGNED: case RID_AUTO: case RID_RESTRICT: case RID_COMPLEX: case RID_THREAD: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: ok = true; break; default: ok = false; break; } if (!ok) break; /* Accept __attribute__((__const)) as __attribute__((const)) etc. */ attr_name = ridpointers[(int) c_parser_peek_token (parser)->keyword]; } else attr_name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN)) { attr = build_tree_list (attr_name, NULL_TREE); attrs = chainon (attrs, attr); continue; } c_parser_consume_token (parser); /* Parse the attribute contents. If they start with an identifier which is followed by a comma or close parenthesis, then the arguments start with that identifier; otherwise they are an expression list. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID && ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA) || (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN))) { tree arg1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = build_tree_list (NULL_TREE, arg1); else { tree tree_list; c_parser_consume_token (parser); expr_list = c_parser_expr_list (parser, false, true, NULL); tree_list = build_tree_list_vec (expr_list); attr_args = tree_cons (NULL_TREE, arg1, tree_list); release_tree_vector (expr_list); } } else { if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) attr_args = NULL_TREE; else { expr_list = c_parser_expr_list (parser, false, true, NULL); attr_args = build_tree_list_vec (expr_list); release_tree_vector (expr_list); } } attr = build_tree_list (attr_name, attr_args); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } attrs = chainon (attrs, attr); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) c_parser_consume_token (parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } parser->lex_untranslated_string = false; } return attrs; } /* Parse a type name (C90 6.5.5, C99 6.7.6). type-name: specifier-qualifier-list abstract-declarator[opt] */ static struct c_type_name * c_parser_type_name (c_parser *parser) { struct c_declspecs *specs = build_null_declspecs (); struct c_declarator *declarator; struct c_type_name *ret; bool dummy = false; c_parser_declspecs (parser, specs, false, true, true); if (!specs->declspecs_seen_p) { c_parser_error (parser, "expected specifier-qualifier-list"); return NULL; } pending_xref_error (); finish_declspecs (specs); declarator = c_parser_declarator (parser, specs->type_seen_p, C_DTR_ABSTRACT, &dummy); if (declarator == NULL) return NULL; ret = XOBNEW (&parser_obstack, struct c_type_name); ret->specs = specs; ret->declarator = declarator; return ret; } /* Parse an initializer (C90 6.5.7, C99 6.7.8). initializer: assignment-expression { initializer-list } { initializer-list , } initializer-list: designation[opt] initializer initializer-list , designation[opt] initializer designation: designator-list = designator-list: designator designator-list designator designator: array-designator . identifier array-designator: [ constant-expression ] GNU extensions: initializer: { } designation: array-designator identifier : array-designator: [ constant-expression ... constant-expression ] Any expression without commas is accepted in the syntax for the constant-expressions, with non-constant expressions rejected later. This function is only used for top-level initializers; for nested ones, see c_parser_initval. */ static struct c_expr c_parser_initializer (c_parser *parser) { if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_braced_init (parser, NULL_TREE, false); else { struct c_expr ret; location_t loc = c_parser_peek_token (parser)->location; ret = c_parser_expr_no_commas (parser, NULL); if (TREE_CODE (ret.value) != STRING_CST && TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR) ret = default_function_array_conversion (loc, ret); return ret; } } /* Parse a braced initializer list. TYPE is the type specified for a compound literal, and NULL_TREE for other initializers and for nested braced lists. NESTED_P is true for nested braced lists, false for the list of a compound literal or the list that is the top-level initializer in a declaration. */ static struct c_expr c_parser_braced_init (c_parser *parser, tree type, bool nested_p) { location_t brace_loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); if (nested_p) push_init_level (0); else really_start_incremental_init (type); if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { pedwarn (brace_loc, OPT_pedantic, "ISO C forbids empty initializer braces"); } else { /* Parse a non-empty initializer list, possibly with a trailing comma. */ while (true) { c_parser_initelt (parser); if (parser->error) break; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; } } if (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { struct c_expr ret; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %<}%>"); pop_init_level (0); return ret; } c_parser_consume_token (parser); return pop_init_level (0); } /* Parse a nested initializer, including designators. */ static void c_parser_initelt (c_parser *parser) { /* Parse any designator or designator list. A single array designator may have the subsequent "=" omitted in GNU C, but a longer list or a structure member designator may not. */ if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON) { /* Old-style structure member designator. */ set_init_label (c_parser_peek_token (parser)->value); /* Use the colon as the error location. */ pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_pedantic, "obsolete use of designated initializer with %<:%>"); c_parser_consume_token (parser); c_parser_consume_token (parser); } else { /* des_seen is 0 if there have been no designators, 1 if there has been a single array designator and 2 otherwise. */ int des_seen = 0; /* Location of a designator. */ location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */ while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DOT)) { int des_prev = des_seen; if (!des_seen) des_loc = c_parser_peek_token (parser)->location; if (des_seen < 2) des_seen++; if (c_parser_next_token_is (parser, CPP_DOT)) { des_seen = 2; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { set_init_label (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (init, false); return; } } else { tree first, second; location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */ /* ??? Following the old parser, [ objc-receiver objc-message-args ] is accepted as an initializer, being distinguished from a designator by what follows the first assignment expression inside the square brackets, but after a first array designator a subsequent square bracket is for Objective-C taken to start an expression, using the obsolete form of designated initializer without '=', rather than possibly being a second level of designation: in LALR terms, the '[' is shifted rather than reducing designator to designator-list. */ if (des_prev == 1 && c_dialect_objc ()) { des_seen = des_prev; break; } if (des_prev == 0 && c_dialect_objc ()) { /* This might be an array designator or an Objective-C message expression. If the former, continue parsing here; if the latter, parse the remainder of the initializer given the starting primary-expression. ??? It might make sense to distinguish when des_prev == 1 as well; see previous comment. */ tree rec, args; struct c_expr mexpr; c_parser_consume_token (parser); if (c_parser_peek_token (parser)->type == CPP_NAME && ((c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME) || (c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))) { /* Type name receiver. */ tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); rec = objc_get_class_reference (id); goto parse_message_args; } first = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_ELLIPSIS) || c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) goto array_desig_after_first; /* Expression receiver. So far only one part without commas has been parsed; there might be more of the expression. */ rec = first; while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; location_t comma_loc, exp_loc; comma_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; next = c_parser_expr_no_commas (parser, NULL); next = default_function_array_conversion (exp_loc, next); rec = build_compound_expr (comma_loc, rec, next.value); } parse_message_args: /* Now parse the objc-message-args. */ args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); mexpr.value = objc_build_message_expr (build_tree_list (rec, args)); mexpr.original_code = ERROR_MARK; mexpr.original_type = NULL; /* Now parse and process the remainder of the initializer, starting with this message expression as a primary-expression. */ c_parser_initval (parser, &mexpr); return; } c_parser_consume_token (parser); first = c_parser_expr_no_commas (parser, NULL).value; array_desig_after_first: if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); second = c_parser_expr_no_commas (parser, NULL).value; } else second = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE)) { c_parser_consume_token (parser); set_init_index (first, second); if (second) pedwarn (ellipsis_loc, OPT_pedantic, "ISO C forbids specifying range of elements to initialize"); } else c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } } if (des_seen >= 1) { if (c_parser_next_token_is (parser, CPP_EQ)) { if (!flag_isoc99) pedwarn (des_loc, OPT_pedantic, "ISO C90 forbids specifying subobject to initialize"); c_parser_consume_token (parser); } else { if (des_seen == 1) pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "obsolete use of designated initializer without %<=%>"); else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error (parser, "expected %<=%>"); c_parser_skip_until_found (parser, CPP_COMMA, NULL); process_init_element (init, false); return; } } } } c_parser_initval (parser, NULL); } /* Parse a nested initializer; as c_parser_initializer but parses initializers within braced lists, after any designators have been applied. If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the initializer. */ static void c_parser_initval (c_parser *parser, struct c_expr *after) { struct c_expr init; gcc_assert (!after || c_dialect_objc ()); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after) init = c_parser_braced_init (parser, NULL_TREE, true); else { location_t loc = c_parser_peek_token (parser)->location; init = c_parser_expr_no_commas (parser, after); if (init.value != NULL_TREE && TREE_CODE (init.value) != STRING_CST && TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR) init = default_function_array_conversion (loc, init); } process_init_element (init, false); } /* Parse a compound statement (possibly a function body) (C90 6.6.2, C99 6.8.2). compound-statement: { block-item-list[opt] } { label-declarations block-item-list } block-item-list: block-item block-item-list block-item block-item: nested-declaration statement nested-declaration: declaration GNU extensions: compound-statement: { label-declarations block-item-list } nested-declaration: __extension__ nested-declaration nested-function-definition label-declarations: label-declaration label-declarations label-declaration label-declaration: __label__ identifier-list ; Allowing the mixing of declarations and code is new in C99. The GNU syntax also permits (not shown above) labels at the end of compound statements, which yield an error. We don't allow labels on declarations; this might seem like a natural extension, but there would be a conflict between attributes on the label and prefix attributes on the declaration. ??? The syntax follows the old parser in requiring something after label declarations. Although they are erroneous if the labels declared aren't defined, is it useful for the syntax to be this way? OpenMP: block-item: openmp-directive openmp-directive: barrier-directive flush-directive */ static tree c_parser_compound_statement (c_parser *parser) { tree stmt; location_t brace_loc; brace_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Ensure a scope is entered and left anyway to avoid confusion if we have just prepared to enter a function body. */ stmt = c_begin_compound_stmt (true); c_end_compound_stmt (brace_loc, stmt, true); return error_mark_node; } stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); return c_end_compound_stmt (brace_loc, stmt, true); } /* Parse a compound statement except for the opening brace. This is used for parsing both compound statements and statement expressions (which follow different paths to handling the opening). */ static void c_parser_compound_statement_nostart (c_parser *parser) { bool last_stmt = false; bool last_label = false; bool save_valid_for_pragma = valid_location_for_stdc_pragma_p (); location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); return; } mark_valid_location_for_stdc_pragma (true); if (c_parser_next_token_is_keyword (parser, RID_LABEL)) { /* Read zero or more forward-declarations for labels that nested functions can jump to. */ mark_valid_location_for_stdc_pragma (false); while (c_parser_next_token_is_keyword (parser, RID_LABEL)) { label_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree label; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } label = declare_label (c_parser_peek_token (parser)->value); C_DECLARED_LABEL_FLAG (label) = 1; add_stmt (build_stmt (label_loc, DECL_EXPR, label)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } pedwarn (label_loc, OPT_pedantic, "ISO C forbids label declarations"); } /* We must now have at least one statement, label or declaration. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); c_parser_consume_token (parser); return; } while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE)) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) { if (c_parser_next_token_is_keyword (parser, RID_CASE)) label_loc = c_parser_peek_2nd_token (parser)->location; else label_loc = c_parser_peek_token (parser)->location; last_label = true; last_stmt = false; mark_valid_location_for_stdc_pragma (false); c_parser_label (parser); } else if (!last_label && c_parser_next_token_starts_declspecs (parser)) { last_label = false; mark_valid_location_for_stdc_pragma (false); c_parser_declaration_or_fndef (parser, true, true, true, true); if (last_stmt) pedwarn_c90 (loc, (pedantic && !flag_isoc99) ? OPT_pedantic : OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else if (!last_label && c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); last_label = false; mark_valid_location_for_stdc_pragma (false); c_parser_declaration_or_fndef (parser, true, true, true, true); /* Following the old parser, __extension__ does not disable this diagnostic. */ restore_extension_diagnostics (ext); if (last_stmt) pedwarn_c90 (loc, (pedantic && !flag_isoc99) ? OPT_pedantic : OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else goto statement; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { /* External pragmas, and some omp pragmas, are not associated with regular c code, and so are not to be considered statements syntactically. This ensures that the user doesn't put them places that would turn into syntax errors if the directive were ignored. */ if (c_parser_pragma (parser, pragma_compound)) last_label = false, last_stmt = true; } else if (c_parser_next_token_is (parser, CPP_EOF)) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); c_parser_error (parser, "expected declaration or statement"); return; } else if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { if (parser->in_if_block) { mark_valid_location_for_stdc_pragma (save_valid_for_pragma); error_at (loc, """expected %<}%> before %<else%>"); return; } else { error_at (loc, "%<else%> without a previous %<if%>"); c_parser_consume_token (parser); continue; } } else { statement: last_label = false; last_stmt = true; mark_valid_location_for_stdc_pragma (false); c_parser_statement_after_labels (parser); } parser->error = false; } if (last_label) error_at (label_loc, "label at end of compound statement"); c_parser_consume_token (parser); /* Restore the value we started with. */ mark_valid_location_for_stdc_pragma (save_valid_for_pragma); } /* Parse a label (C90 6.6.1, C99 6.8.1). label: identifier : attributes[opt] case constant-expression : default : GNU extensions: label: case constant-expression ... constant-expression : The use of attributes on labels is a GNU extension. The syntax in GNU C accepts any expressions without commas, non-constant expressions being rejected later. */ static void c_parser_label (c_parser *parser) { location_t loc1 = c_parser_peek_token (parser)->location; tree label = NULL_TREE; if (c_parser_next_token_is_keyword (parser, RID_CASE)) { tree exp1, exp2; c_parser_consume_token (parser); exp1 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); label = do_case (loc1, exp1, NULL_TREE); } else if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { c_parser_consume_token (parser); exp2 = c_parser_expr_no_commas (parser, NULL).value; if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, exp1, exp2); } else c_parser_error (parser, "expected %<:%> or %<...%>"); } else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) { c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) label = do_case (loc1, NULL_TREE, NULL_TREE); } else { tree name = c_parser_peek_token (parser)->value; tree tlab; tree attrs; location_t loc2 = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is (parser, CPP_NAME)); c_parser_consume_token (parser); gcc_assert (c_parser_next_token_is (parser, CPP_COLON)); c_parser_consume_token (parser); attrs = c_parser_attributes (parser); tlab = define_label (loc2, name); if (tlab) { decl_attributes (&tlab, attrs, 0); label = add_stmt (build_stmt (loc1, LABEL_EXPR, tlab)); } } if (label) { if (c_parser_next_token_starts_declspecs (parser) && !(c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) { error_at (c_parser_peek_token (parser)->location, "a label can only be part of a statement and " "a declaration is not a statement"); c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false, /*nested*/ true, /*empty_ok*/ false, /*start_attr_ok*/ true); } } } /* Parse a statement (C90 6.6, C99 6.8). statement: labeled-statement compound-statement expression-statement selection-statement iteration-statement jump-statement labeled-statement: label statement expression-statement: expression[opt] ; selection-statement: if-statement switch-statement iteration-statement: while-statement do-statement for-statement jump-statement: goto identifier ; continue ; break ; return expression[opt] ; GNU extensions: statement: asm-statement jump-statement: goto * expression ; Objective-C: statement: objc-throw-statement objc-try-catch-statement objc-synchronized-statement objc-throw-statement: @throw expression ; @throw ; OpenMP: statement: openmp-construct openmp-construct: parallel-construct for-construct sections-construct single-construct parallel-for-construct parallel-sections-construct master-construct critical-construct atomic-construct ordered-construct parallel-construct: parallel-directive structured-block for-construct: for-directive iteration-statement sections-construct: sections-directive section-scope single-construct: single-directive structured-block parallel-for-construct: parallel-for-directive iteration-statement parallel-sections-construct: parallel-sections-directive section-scope master-construct: master-directive structured-block critical-construct: critical-directive structured-block atomic-construct: atomic-directive expression-statement ordered-construct: ordered-directive structured-block */ static void c_parser_statement (c_parser *parser) { while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); c_parser_statement_after_labels (parser); } /* Parse a statement, other than a labeled statement. */ static void c_parser_statement_after_labels (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree stmt = NULL_TREE; bool in_if_block = parser->in_if_block; parser->in_if_block = false; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_BRACE: add_stmt (c_parser_compound_statement (parser)); break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_IF: c_parser_if_statement (parser); break; case RID_SWITCH: c_parser_switch_statement (parser); break; case RID_WHILE: c_parser_while_statement (parser); break; case RID_DO: c_parser_do_statement (parser); break; case RID_FOR: c_parser_for_statement (parser); break; case RID_GOTO: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { stmt = c_finish_goto_label (loc, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else if (c_parser_next_token_is (parser, CPP_MULT)) { c_parser_consume_token (parser); stmt = c_finish_goto_ptr (loc, c_parser_expression (parser).value); } else c_parser_error (parser, "expected identifier or %<*%>"); goto expect_semicolon; case RID_CONTINUE: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_cont_label, false); goto expect_semicolon; case RID_BREAK: c_parser_consume_token (parser); stmt = c_finish_bc_stmt (loc, &c_break_label, true); goto expect_semicolon; case RID_RETURN: c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = c_finish_return (loc, NULL_TREE, NULL_TREE); c_parser_consume_token (parser); } else { struct c_expr expr = c_parser_expression_conv (parser); stmt = c_finish_return (loc, expr.value, expr.original_type); goto expect_semicolon; } break; case RID_ASM: stmt = c_parser_asm_statement (parser); break; case RID_THROW: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { stmt = objc_build_throw_stmt (loc, NULL_TREE); c_parser_consume_token (parser); } else { tree expr = c_parser_expression (parser).value; expr = c_fully_fold (expr, false, NULL); stmt = objc_build_throw_stmt (loc, expr); goto expect_semicolon; } break; case RID_TRY: gcc_assert (c_dialect_objc ()); c_parser_objc_try_catch_statement (parser); break; case RID_AT_SYNCHRONIZED: gcc_assert (c_dialect_objc ()); c_parser_objc_synchronized_statement (parser); break; default: goto expr_stmt; } break; case CPP_SEMICOLON: c_parser_consume_token (parser); break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: /* Avoid infinite loop in error recovery: c_parser_skip_until_found stops at a closing nesting delimiter without consuming it, but here we need to consume it to proceed further. */ c_parser_error (parser, "expected statement"); c_parser_consume_token (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_stmt); break; default: expr_stmt: stmt = c_finish_expr_stmt (loc, c_parser_expression_conv (parser).value); expect_semicolon: c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); break; } /* Two cases cannot and do not have line numbers associated: If stmt is degenerate, such as "2;", then stmt is an INTEGER_CST, which cannot hold line numbers. But that's OK because the statement will either be changed to a MODIFY_EXPR during gimplification of the statement expr, or discarded. If stmt was compound, but without new variables, we will have skipped the creation of a BIND and will have a bare STATEMENT_LIST. But that's OK because (recursively) all of the component statements should already have line numbers assigned. ??? Can we discard no-op statements earlier? */ if (CAN_HAVE_LOCATION_P (stmt) && EXPR_LOCATION (stmt) == UNKNOWN_LOCATION) SET_EXPR_LOCATION (stmt, loc); parser->in_if_block = in_if_block; } /* Parse the condition from an if, do, while or for statements. */ static tree c_parser_condition (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; tree cond; cond = c_parser_expression_conv (parser).value; cond = c_objc_common_truthvalue_conversion (loc, cond); cond = c_fully_fold (cond, false, NULL); if (warn_sequence_point) verify_sequence_points (cond); return cond; } /* Parse a parenthesized condition from an if, do or while statement. condition: ( expression ) */ static tree c_parser_paren_condition (c_parser *parser) { tree cond; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return error_mark_node; cond = c_parser_condition (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); return cond; } /* Parse a statement which is a block in C99. */ static tree c_parser_c99_block_statement (c_parser *parser) { tree block = c_begin_compound_stmt (flag_isoc99); location_t loc = c_parser_peek_token (parser)->location; c_parser_statement (parser); return c_end_compound_stmt (loc, block, flag_isoc99); } /* Parse the body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we track whether the body is an if statement for the sake of -Wparentheses warnings, (c) we handle an empty body specially for the sake of -Wempty-body warnings, and (d) we call parser_compound_statement directly because c_parser_statement_after_labels resets parser->in_if_block. */ static tree c_parser_if_body (c_parser *parser, bool *if_p) { tree block = c_begin_compound_stmt (flag_isoc99); location_t body_loc = c_parser_peek_token (parser)->location; while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); *if_p = c_parser_next_token_is_keyword (parser, RID_IF); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); if (!c_parser_next_token_is_keyword (parser, RID_ELSE)) warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<if%> statement"); } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) add_stmt (c_parser_compound_statement (parser)); else c_parser_statement_after_labels (parser); return c_end_compound_stmt (body_loc, block, flag_isoc99); } /* Parse the else body of an if statement. This is just parsing a statement but (a) it is a block in C99, (b) we handle an empty body specially for the sake of -Wempty-body warnings. */ static tree c_parser_else_body (c_parser *parser) { location_t else_loc = c_parser_peek_token (parser)->location; tree block = c_begin_compound_stmt (flag_isoc99); while (c_parser_next_token_is_keyword (parser, RID_CASE) || c_parser_next_token_is_keyword (parser, RID_DEFAULT) || (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_COLON)) c_parser_label (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token (parser)->location; warning_at (loc, OPT_Wempty_body, "suggest braces around empty body in an %<else%> statement"); add_stmt (build_empty_stmt (loc)); c_parser_consume_token (parser); } else c_parser_statement_after_labels (parser); return c_end_compound_stmt (else_loc, block, flag_isoc99); } /* Parse an if statement (C90 6.6.4, C99 6.8.4). if-statement: if ( expression ) statement if ( expression ) statement else statement */ static void c_parser_if_statement (c_parser *parser) { tree block; location_t loc; tree cond; bool first_if = false; tree first_body, second_body; bool in_if_block; gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); in_if_block = parser->in_if_block; parser->in_if_block = true; first_body = c_parser_if_body (parser, &first_if); parser->in_if_block = in_if_block; if (c_parser_next_token_is_keyword (parser, RID_ELSE)) { c_parser_consume_token (parser); second_body = c_parser_else_body (parser); } else second_body = NULL_TREE; c_finish_if_stmt (loc, cond, first_body, second_body, first_if); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); } /* Parse a switch statement (C90 6.6.4, C99 6.8.4). switch-statement: switch (expression) statement */ static void c_parser_switch_statement (c_parser *parser) { tree block, expr, body, save_break; location_t switch_loc = c_parser_peek_token (parser)->location; location_t switch_cond_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { switch_cond_loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { switch_cond_loc = UNKNOWN_LOCATION; expr = error_mark_node; } c_start_case (switch_loc, switch_cond_loc, expr); save_break = c_break_label; c_break_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_case (body); if (c_break_label) { location_t here = c_parser_peek_token (parser)->location; tree t = build1 (LABEL_EXPR, void_type_node, c_break_label); SET_EXPR_LOCATION (t, here); add_stmt (t); } c_break_label = save_break; add_stmt (c_end_compound_stmt (switch_loc, block, flag_isoc99)); } /* Parse a while statement (C90 6.6.5, C99 6.8.5). while-statement: while (expression) statement */ static void c_parser_while_statement (c_parser *parser) { tree block, cond, body, save_break, save_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; cond = c_parser_paren_condition (parser); save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_loop (loc, cond, NULL, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* Parse a do statement (C90 6.6.5, C99 6.8.5). do-statement: do statement while ( expression ) ; */ static void c_parser_do_statement (c_parser *parser) { tree block, cond, body, save_break, save_cont, new_break, new_cont; location_t loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) warning_at (c_parser_peek_token (parser)->location, OPT_Wempty_body, "suggest braces around empty body in %<do%> statement"); block = c_begin_compound_stmt (flag_isoc99); loc = c_parser_peek_token (parser)->location; save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>"); new_break = c_break_label; c_break_label = save_break; new_cont = c_cont_label; c_cont_label = save_cont; cond = c_parser_paren_condition (parser); if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); c_finish_loop (loc, cond, NULL, body, new_break, new_cont, false); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); } /* Parse a for statement (C90 6.6.5, C99 6.8.5). for-statement: for ( expression[opt] ; expression[opt] ; expression[opt] ) statement for ( nested-declaration expression[opt] ; expression[opt] ) statement The form with a declaration is new in C99. ??? In accordance with the old parser, the declaration may be a nested function, which is then rejected in check_for_loop_decls, but does it make any sense for this to be included in the grammar? Note in particular that the nested function does not include a trailing ';', whereas the "declaration" production includes one. Also, can we reject bad declarations earlier and cheaper than check_for_loop_decls? */ static void c_parser_for_statement (c_parser *parser) { tree block, cond, incr, save_break, save_cont, body; location_t loc = c_parser_peek_token (parser)->location; location_t for_loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR)); c_parser_consume_token (parser); block = c_begin_compound_stmt (flag_isoc99); if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { /* Parse the initialization declaration or expression. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); c_finish_expr_stmt (loc, NULL_TREE); } else if (c_parser_next_token_starts_declspecs (parser)) { c_parser_declaration_or_fndef (parser, true, true, true, true); check_for_loop_decls (for_loc); } else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION)) { /* __extension__ can start a declaration, but is also an unary operator that can start an expression. Consume all but the last of a possible series of __extension__ to determine which. */ while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token (parser)->keyword == RID_EXTENSION)) c_parser_consume_token (parser); if (c_token_starts_declspecs (c_parser_peek_2nd_token (parser))) { int ext; ext = disable_extension_diagnostics (); c_parser_consume_token (parser); c_parser_declaration_or_fndef (parser, true, true, true, true); restore_extension_diagnostics (ext); check_for_loop_decls (for_loc); } else goto init_expr; } else { init_expr: c_finish_expr_stmt (loc, c_parser_expression (parser).value); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse the loop condition. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); cond = NULL_TREE; } else { cond = c_parser_condition (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse the increment expression. */ if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) incr = c_process_expr_stmt (loc, NULL_TREE); else incr = c_process_expr_stmt (loc, c_parser_expression (parser).value); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { cond = error_mark_node; incr = error_mark_node; } save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement (parser); c_finish_loop (loc, cond, incr, body, c_break_label, c_cont_label, true); add_stmt (c_end_compound_stmt (loc, block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* Parse an asm statement, a GNU extension. This is a full-blown asm statement with inputs, outputs, clobbers, and volatile tag allowed. asm-statement: asm type-qualifier[opt] ( asm-argument ) ; asm type-qualifier[opt] goto ( asm-goto-argument ) ; asm-argument: asm-string-literal asm-string-literal : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] asm-string-literal : asm-operands[opt] : asm-operands[opt] : asm-clobbers[opt] asm-goto-argument: asm-string-literal : : asm-operands[opt] : asm-clobbers[opt] \ : asm-goto-operands Qualifiers other than volatile are accepted in the syntax but warned for. */ static tree c_parser_asm_statement (c_parser *parser) { tree quals, str, outputs, inputs, clobbers, labels, ret; bool simple, is_goto; location_t asm_loc = c_parser_peek_token (parser)->location; int section, nsections; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM)); c_parser_consume_token (parser); if (c_parser_next_token_is_keyword (parser, RID_VOLATILE)) { quals = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else if (c_parser_next_token_is_keyword (parser, RID_CONST) || c_parser_next_token_is_keyword (parser, RID_RESTRICT)) { warning_at (c_parser_peek_token (parser)->location, 0, "%E qualifier ignored on asm", c_parser_peek_token (parser)->value); quals = NULL_TREE; c_parser_consume_token (parser); } else quals = NULL_TREE; is_goto = false; if (c_parser_next_token_is_keyword (parser, RID_GOTO)) { c_parser_consume_token (parser); is_goto = true; } /* ??? Follow the C++ parser rather than using the lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; ret = NULL; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto error; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) goto error_close_paren; simple = true; outputs = NULL_TREE; inputs = NULL_TREE; clobbers = NULL_TREE; labels = NULL_TREE; if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; /* Parse each colon-delimited section of operands. */ nsections = 3 + is_goto; for (section = 0; section < nsections; ++section) { if (!c_parser_require (parser, CPP_COLON, is_goto ? "expected %<:%>" : "expected %<:%> or %<)%>")) goto error_close_paren; /* Once past any colon, we're no longer a simple asm. */ simple = false; if ((!c_parser_next_token_is (parser, CPP_COLON) && !c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) || section == 3) switch (section) { case 0: /* For asm goto, we don't allow output operands, but reserve the slot for a future extension that does allow them. */ if (!is_goto) outputs = c_parser_asm_operands (parser, false); break; case 1: inputs = c_parser_asm_operands (parser, true); break; case 2: clobbers = c_parser_asm_clobbers (parser); break; case 3: labels = c_parser_asm_goto_operands (parser); break; default: gcc_unreachable (); } if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; } done_asm: if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement (parser); ret = build_asm_stmt (quals, build_asm_expr (asm_loc, str, outputs, inputs, clobbers, labels, simple)); error: parser->lex_untranslated_string = false; return ret; error_close_paren: c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); goto error; } /* Parse asm operands, a GNU extension. If CONVERT_P (for inputs but not outputs), apply the default conversion of functions and arrays to pointers. asm-operands: asm-operand asm-operands , asm-operand asm-operand: asm-string-literal ( expression ) [ identifier ] asm-string-literal ( expression ) */ static tree c_parser_asm_operands (c_parser *parser, bool convert_p) { tree list = NULL_TREE; location_t loc; while (true) { tree name, str; struct c_expr expr; if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); name = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id)); } else { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL); return NULL_TREE; } c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } else name = NULL_TREE; str = c_parser_asm_string_literal (parser); if (str == NULL_TREE) return NULL_TREE; parser->lex_untranslated_string = false; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = true; return NULL_TREE; } loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); if (convert_p) expr = default_function_array_conversion (loc, expr); expr.value = c_fully_fold (expr.value, false, NULL); parser->lex_untranslated_string = true; if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } list = chainon (list, build_tree_list (build_tree_list (name, str), expr.value)); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm clobbers, a GNU extension. asm-clobbers: asm-string-literal asm-clobbers , asm-string-literal */ static tree c_parser_asm_clobbers (c_parser *parser) { tree list = NULL_TREE; while (true) { tree str = c_parser_asm_string_literal (parser); if (str) list = tree_cons (NULL_TREE, str, list); else return NULL_TREE; if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } return list; } /* Parse asm goto labels, a GNU extension. asm-goto-operands: identifier asm-goto-operands , identifier */ static tree c_parser_asm_goto_operands (c_parser *parser) { tree list = NULL_TREE; while (true) { tree name, label; if (c_parser_next_token_is (parser, CPP_NAME)) { c_token *tok = c_parser_peek_token (parser); name = tok->value; label = lookup_label_for_goto (tok->location, name); c_parser_consume_token (parser); TREE_USED (label) = 1; } else { c_parser_error (parser, "expected identifier"); return NULL_TREE; } name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); list = tree_cons (name, label, list); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else return nreverse (list); } } /* Parse an expression other than a compound expression; that is, an assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. assignment-expression: conditional-expression unary-expression assignment-operator assignment-expression assignment-operator: one of = *= /= %= += -= <<= >>= &= ^= |= In GNU C we accept any conditional expression on the LHS and diagnose the invalid lvalue rather than producing a syntax error. */ static struct c_expr c_parser_expr_no_commas (c_parser *parser, struct c_expr *after) { struct c_expr lhs, rhs, ret; enum tree_code code; location_t op_location, exp_location; gcc_assert (!after || c_dialect_objc ()); lhs = c_parser_conditional_expression (parser, after); op_location = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_EQ: code = NOP_EXPR; break; case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: code = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; default: return lhs; } c_parser_consume_token (parser); exp_location = c_parser_peek_token (parser)->location; rhs = c_parser_expr_no_commas (parser, NULL); rhs = default_function_array_conversion (exp_location, rhs); ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type, code, exp_location, rhs.value, rhs.original_type); if (code == NOP_EXPR) ret.original_code = MODIFY_EXPR; else { TREE_NO_WARNING (ret.value) = 1; ret.original_code = ERROR_MARK; } ret.original_type = NULL; return ret; } /* Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. conditional-expression: logical-OR-expression logical-OR-expression ? expression : conditional-expression GNU extensions: conditional-expression: logical-OR-expression ? : conditional-expression */ static struct c_expr c_parser_conditional_expression (c_parser *parser, struct c_expr *after) { struct c_expr cond, exp1, exp2, ret; location_t cond_loc, colon_loc; gcc_assert (!after || c_dialect_objc ()); cond = c_parser_binary_expression (parser, after); if (c_parser_next_token_is_not (parser, CPP_QUERY)) return cond; cond_loc = c_parser_peek_token (parser)->location; cond = default_function_array_conversion (cond_loc, cond); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COLON)) { tree eptype = NULL_TREE; pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C forbids omitting the middle term of a ?: expression"); if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE (cond.value); cond.value = TREE_OPERAND (cond.value, 0); } /* Make sure first operand is calculated only once. */ exp1.value = c_save_expr (default_conversion (cond.value)); if (eptype) exp1.value = build1 (EXCESS_PRECISION_EXPR, eptype, exp1.value); exp1.original_type = NULL; cond.value = c_objc_common_truthvalue_conversion (cond_loc, exp1.value); c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node; } else { cond.value = c_objc_common_truthvalue_conversion (cond_loc, default_conversion (cond.value)); c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node; exp1 = c_parser_expression_conv (parser); c_inhibit_evaluation_warnings += ((cond.value == truthvalue_true_node) - (cond.value == truthvalue_false_node)); } colon_loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) { c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } { location_t exp2_loc = c_parser_peek_token (parser)->location; exp2 = c_parser_conditional_expression (parser, NULL); exp2 = default_function_array_conversion (exp2_loc, exp2); } c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = build_conditional_expr (colon_loc, cond.value, cond.original_code == C_MAYBE_CONST_EXPR, exp1.value, exp1.original_type, exp2.value, exp2.original_type); ret.original_code = ERROR_MARK; if (exp1.value == error_mark_node || exp2.value == error_mark_node) ret.original_type = NULL; else { tree t1, t2; /* If both sides are enum type, the default conversion will have made the type of the result be an integer type. We want to remember the enum types we started with. */ t1 = exp1.original_type ? exp1.original_type : TREE_TYPE (exp1.value); t2 = exp2.original_type ? exp2.original_type : TREE_TYPE (exp2.value); ret.original_type = ((t1 != error_mark_node && t2 != error_mark_node && (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))) ? t1 : NULL); } return ret; } /* Parse a binary expression; that is, a logical-OR-expression (C90 6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. multiplicative-expression: cast-expression multiplicative-expression * cast-expression multiplicative-expression / cast-expression multiplicative-expression % cast-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression AND-expression: equality-expression AND-expression & equality-expression exclusive-OR-expression: AND-expression exclusive-OR-expression ^ AND-expression inclusive-OR-expression: exclusive-OR-expression inclusive-OR-expression | exclusive-OR-expression logical-AND-expression: inclusive-OR-expression logical-AND-expression && inclusive-OR-expression logical-OR-expression: logical-AND-expression logical-OR-expression || logical-AND-expression */ static struct c_expr c_parser_binary_expression (c_parser *parser, struct c_expr *after) { /* A binary expression is parsed using operator-precedence parsing, with the operands being cast expressions. All the binary operators are left-associative. Thus a binary expression is of form: E0 op1 E1 op2 E2 ... which we represent on a stack. On the stack, the precedence levels are strictly increasing. When a new operator is encountered of higher precedence than that at the top of the stack, it is pushed; its LHS is the top expression, and its RHS is everything parsed until it is popped. When a new operator is encountered with precedence less than or equal to that at the top of the stack, triples E[i-1] op[i] E[i] are popped and replaced by the result of the operation until the operator at the top of the stack has lower precedence than the new operator or there is only one element on the stack; then the top expression is the LHS of the new operator. In the case of logical AND and OR expressions, we also need to adjust c_inhibit_evaluation_warnings as appropriate when the operators are pushed and popped. */ /* The precedence levels, where 0 is a dummy lowest level used for the bottom of the stack. */ enum prec { PREC_NONE, PREC_LOGOR, PREC_LOGAND, PREC_BITOR, PREC_BITXOR, PREC_BITAND, PREC_EQ, PREC_REL, PREC_SHIFT, PREC_ADD, PREC_MULT, NUM_PRECS }; struct { /* The expression at this stack level. */ struct c_expr expr; /* The precedence of the operator on its left, PREC_NONE at the bottom of the stack. */ enum prec prec; /* The operation on its left. */ enum tree_code op; /* The source location of this operation. */ location_t loc; } stack[NUM_PRECS]; int sp; /* Location of the binary operator. */ location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */ #define POP \ do { \ switch (stack[sp].op) \ { \ case TRUTH_ANDIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_false_node); \ break; \ case TRUTH_ORIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_true_node); \ break; \ default: \ break; \ } \ stack[sp - 1].expr \ = default_function_array_conversion (stack[sp - 1].loc, \ stack[sp - 1].expr); \ stack[sp].expr \ = default_function_array_conversion (stack[sp].loc, stack[sp].expr); \ stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \ stack[sp].op, \ stack[sp - 1].expr, \ stack[sp].expr); \ sp--; \ } while (0) gcc_assert (!after || c_dialect_objc ()); stack[0].loc = c_parser_peek_token (parser)->location; stack[0].expr = c_parser_cast_expression (parser, after); stack[0].prec = PREC_NONE; sp = 0; while (true) { enum prec oprec; enum tree_code ocode; if (parser->error) goto out; switch (c_parser_peek_token (parser)->type) { case CPP_MULT: oprec = PREC_MULT; ocode = MULT_EXPR; break; case CPP_DIV: oprec = PREC_MULT; ocode = TRUNC_DIV_EXPR; break; case CPP_MOD: oprec = PREC_MULT; ocode = TRUNC_MOD_EXPR; break; case CPP_PLUS: oprec = PREC_ADD; ocode = PLUS_EXPR; break; case CPP_MINUS: oprec = PREC_ADD; ocode = MINUS_EXPR; break; case CPP_LSHIFT: oprec = PREC_SHIFT; ocode = LSHIFT_EXPR; break; case CPP_RSHIFT: oprec = PREC_SHIFT; ocode = RSHIFT_EXPR; break; case CPP_LESS: oprec = PREC_REL; ocode = LT_EXPR; break; case CPP_GREATER: oprec = PREC_REL; ocode = GT_EXPR; break; case CPP_LESS_EQ: oprec = PREC_REL; ocode = LE_EXPR; break; case CPP_GREATER_EQ: oprec = PREC_REL; ocode = GE_EXPR; break; case CPP_EQ_EQ: oprec = PREC_EQ; ocode = EQ_EXPR; break; case CPP_NOT_EQ: oprec = PREC_EQ; ocode = NE_EXPR; break; case CPP_AND: oprec = PREC_BITAND; ocode = BIT_AND_EXPR; break; case CPP_XOR: oprec = PREC_BITXOR; ocode = BIT_XOR_EXPR; break; case CPP_OR: oprec = PREC_BITOR; ocode = BIT_IOR_EXPR; break; case CPP_AND_AND: oprec = PREC_LOGAND; ocode = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: oprec = PREC_LOGOR; ocode = TRUTH_ORIF_EXPR; break; default: /* Not a binary operator, so end of the binary expression. */ goto out; } binary_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); while (oprec <= stack[sp].prec) POP; switch (ocode) { case TRUTH_ANDIF_EXPR: stack[sp].expr = default_function_array_conversion (stack[sp].loc, stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_false_node); break; case TRUTH_ORIF_EXPR: stack[sp].expr = default_function_array_conversion (stack[sp].loc, stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion (stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_true_node); break; default: break; } sp++; stack[sp].loc = binary_loc; stack[sp].expr = c_parser_cast_expression (parser, NULL); stack[sp].prec = oprec; stack[sp].op = ocode; stack[sp].loc = binary_loc; } out: while (sp > 0) POP; return stack[0].expr; #undef POP } /* Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not NULL then it is an Objective-C message expression which is the primary-expression starting the expression as an initializer. cast-expression: unary-expression ( type-name ) unary-expression */ static struct c_expr c_parser_cast_expression (c_parser *parser, struct c_expr *after) { location_t cast_loc = c_parser_peek_token (parser)->location; gcc_assert (!after || c_dialect_objc ()); if (after) return c_parser_postfix_expression_after_primary (parser, cast_loc, *after); /* If the expression begins with a parenthesized type name, it may be either a cast or a compound literal; we need to see whether the next character is '{' to tell the difference. If not, it is an unary expression. */ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { struct c_type_name *type_name; struct c_expr ret; struct c_expr expr; c_parser_consume_token (parser); type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } /* Save casted types in the function's used types hash table. */ used_types_insert (type_name->specs->type); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) return c_parser_postfix_expression_after_paren_type (parser, type_name, cast_loc); { location_t expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_cast_expression (parser, NULL); expr = default_function_array_conversion (expr_loc, expr); } ret.value = c_cast_expr (cast_loc, type_name, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else return c_parser_unary_expression (parser); } /* Parse an unary expression (C90 6.3.3, C99 6.5.3). unary-expression: postfix-expression ++ unary-expression -- unary-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-name ) unary-operator: one of & * + - ~ ! GNU extensions: unary-expression: __alignof__ unary-expression __alignof__ ( type-name ) && identifier unary-operator: one of __extension__ __real__ __imag__ In addition, the GNU syntax treats ++ and -- as unary operators, so they may be applied to cast expressions with errors for non-lvalues given later. */ static struct c_expr c_parser_unary_expression (c_parser *parser) { int ext; struct c_expr ret, op; location_t op_loc = c_parser_peek_token (parser)->location; location_t exp_loc; ret.original_code = ERROR_MARK; ret.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS_PLUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREINCREMENT_EXPR, op); case CPP_MINUS_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op); case CPP_AND: c_parser_consume_token (parser); return parser_build_unary_op (op_loc, ADDR_EXPR, c_parser_cast_expression (parser, NULL)); case CPP_MULT: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); ret.value = build_indirect_ref (op_loc, op.value, RO_UNARY_STAR); return ret; case CPP_PLUS: if (!c_dialect_objc () && !in_system_header) warning_at (op_loc, OPT_Wtraditional, "traditional C rejects the unary plus operator"); c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, CONVERT_EXPR, op); case CPP_MINUS: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, NEGATE_EXPR, op); case CPP_COMPL: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, BIT_NOT_EXPR, op); case CPP_NOT: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, TRUTH_NOT_EXPR, op); case CPP_AND_AND: /* Refer to the address of a label as a pointer. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { ret.value = finish_label_address_expr (c_parser_peek_token (parser)->value, op_loc); c_parser_consume_token (parser); } else { c_parser_error (parser, "expected identifier"); ret.value = error_mark_node; } return ret; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_SIZEOF: return c_parser_sizeof_expression (parser); case RID_ALIGNOF: return c_parser_alignof_expression (parser); case RID_EXTENSION: c_parser_consume_token (parser); ext = disable_extension_diagnostics (); ret = c_parser_cast_expression (parser, NULL); restore_extension_diagnostics (ext); return ret; case RID_REALPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, REALPART_EXPR, op); case RID_IMAGPART: c_parser_consume_token (parser); exp_loc = c_parser_peek_token (parser)->location; op = c_parser_cast_expression (parser, NULL); op = default_function_array_conversion (exp_loc, op); return parser_build_unary_op (op_loc, IMAGPART_EXPR, op); default: return c_parser_postfix_expression (parser); } default: return c_parser_postfix_expression (parser); } } /* Parse a sizeof expression. */ static struct c_expr c_parser_sizeof_expression (c_parser *parser) { struct c_expr expr; location_t expr_loc; gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_sizeof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either sizeof ( type-name ) or sizeof unary-expression starting with a compound literal. */ struct c_type_name *type_name; c_parser_consume_token (parser); expr_loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_sizeof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, expr_loc); goto sizeof_expr; } /* sizeof ( type-name ). */ c_inhibit_evaluation_warnings--; in_sizeof--; return c_expr_sizeof_type (expr_loc, type_name); } else { expr_loc = c_parser_peek_token (parser)->location; expr = c_parser_unary_expression (parser); sizeof_expr: c_inhibit_evaluation_warnings--; in_sizeof--; if (TREE_CODE (expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1))) error_at (expr_loc, "%<sizeof%> applied to a bit-field"); return c_expr_sizeof_expr (expr_loc, expr); } } /* Parse an alignof expression. */ static struct c_expr c_parser_alignof_expression (c_parser *parser) { struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF)); c_parser_consume_token (parser); c_inhibit_evaluation_warnings++; in_alignof++; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN) && c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* Either __alignof__ ( type-name ) or __alignof__ unary-expression starting with a compound literal. */ location_t loc; struct c_type_name *type_name; struct c_expr ret; c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_alignof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type (parser, type_name, loc); goto alignof_expr; } /* alignof ( type-name ). */ c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_alignof (loc, groktypename (type_name, NULL, NULL)); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else { struct c_expr ret; expr = c_parser_unary_expression (parser); alignof_expr: c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_alignof_expr (loc, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } } /* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2). postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( argument-expression-list[opt] ) postfix-expression . identifier postfix-expression -> identifier postfix-expression ++ postfix-expression -- ( type-name ) { initializer-list } ( type-name ) { initializer-list , } argument-expression-list: argument-expression argument-expression-list , argument-expression primary-expression: identifier constant string-literal ( expression ) GNU extensions: primary-expression: __func__ (treated as a keyword in GNU C) __FUNCTION__ __PRETTY_FUNCTION__ ( compound-statement ) __builtin_va_arg ( assignment-expression , type-name ) __builtin_offsetof ( type-name , offsetof-member-designator ) __builtin_choose_expr ( assignment-expression , assignment-expression , assignment-expression ) __builtin_types_compatible_p ( type-name , type-name ) offsetof-member-designator: identifier offsetof-member-designator . identifier offsetof-member-designator [ expression ] Objective-C: primary-expression: [ objc-receiver objc-message-args ] @selector ( objc-selector-arg ) @protocol ( identifier ) @encode ( type-name ) objc-string-literal */ static struct c_expr c_parser_postfix_expression (c_parser *parser) { struct c_expr expr, e1, e2, e3; struct c_type_name *t1, *t2; location_t loc = c_parser_peek_token (parser)->location;; expr.original_code = ERROR_MARK; expr.original_type = NULL; switch (c_parser_peek_token (parser)->type) { case CPP_NUMBER: expr.value = c_parser_peek_token (parser)->value; loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (TREE_CODE (expr.value) == FIXED_CST && !targetm.fixed_point_supported_p ()) { error_at (loc, "fixed-point types not supported for this target"); expr.value = error_mark_node; } break; case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: expr.value = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: expr.value = c_parser_peek_token (parser)->value; expr.original_code = STRING_CST; c_parser_consume_token (parser); break; case CPP_OBJC_STRING: gcc_assert (c_dialect_objc ()); expr.value = objc_build_string_object (c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case CPP_NAME: if (c_parser_peek_token (parser)->id_kind != C_ID_ID) { c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); expr.value = build_external_ref (loc, id, (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN), &expr.original_type); } break; case CPP_OPEN_PAREN: /* A parenthesized expression, statement expression or compound literal. */ if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE) { /* A statement expression. */ tree stmt; location_t brace_loc; c_parser_consume_token (parser); brace_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); if (cur_stmt_list == NULL) { error_at (loc, "braced-group within expression allowed " "only inside a function"); parser->error = true; c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } stmt = c_begin_stmt_expr (); c_parser_compound_statement_nostart (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); pedwarn (loc, OPT_pedantic, "ISO C forbids braced-groups within expressions"); expr.value = c_finish_stmt_expr (brace_loc, stmt); } else if (c_token_starts_typename (c_parser_peek_2nd_token (parser))) { /* A compound literal. ??? Can we actually get here rather than going directly to c_parser_postfix_expression_after_paren_type from elsewhere? */ location_t loc; struct c_type_name *type_name; c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; type_name = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { expr.value = error_mark_node; } else expr = c_parser_postfix_expression_after_paren_type (parser, type_name, loc); } else { /* A parenthesized expression. */ c_parser_consume_token (parser); expr = c_parser_expression (parser); if (TREE_CODE (expr.value) == MODIFY_EXPR) TREE_NO_WARNING (expr.value) = 1; if (expr.original_code != C_MAYBE_CONST_EXPR) expr.original_code = ERROR_MARK; /* Don't change EXPR.ORIGINAL_TYPE. */ c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } break; case CPP_KEYWORD: switch (c_parser_peek_token (parser)->keyword) { case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); break; case RID_VA_ARG: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } e1 = c_parser_expr_no_commas (parser, NULL); e1.value = c_fully_fold (e1.value, false, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } loc = c_parser_peek_token (parser)->location; t1 = c_parser_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t1 == NULL) { expr.value = error_mark_node; } else { tree type_expr = NULL_TREE; expr.value = c_build_va_arg (loc, e1.value, groktypename (t1, &type_expr, NULL)); if (type_expr) { expr.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr.value), type_expr, expr.value); C_MAYBE_CONST_EXPR_NON_CONST (expr.value) = true; } } break; case RID_OFFSETOF: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree type = groktypename (t1, NULL, NULL); tree offsetof_ref; if (type == error_mark_node) offsetof_ref = error_mark_node; else { offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node); SET_EXPR_LOCATION (offsetof_ref, loc); } /* Parse the second argument to __builtin_offsetof. We must have one identifier, and beyond that we want to accept sub structure and sub array references. */ if (c_parser_next_token_is (parser, CPP_NAME)) { offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); while (c_parser_next_token_is (parser, CPP_DOT) || c_parser_next_token_is (parser, CPP_OPEN_SQUARE) || c_parser_next_token_is (parser, CPP_DEREF)) { if (c_parser_next_token_is (parser, CPP_DEREF)) { loc = c_parser_peek_token (parser)->location; offsetof_ref = build_array_ref (loc, offsetof_ref, integer_zero_node); goto do_dot; } else if (c_parser_next_token_is (parser, CPP_DOT)) { do_dot: c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token (parser)->value); c_parser_consume_token (parser); } else { tree idx; loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); idx = c_parser_expression (parser).value; idx = c_fully_fold (idx, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); offsetof_ref = build_array_ref (loc, offsetof_ref, idx); } } } else c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = fold_offsetof (offsetof_ref, NULL_TREE); } break; case RID_CHOOSE_EXPR: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } loc = c_parser_peek_token (parser)->location; e1 = c_parser_expr_no_commas (parser, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } e2 = c_parser_expr_no_commas (parser, NULL); if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } e3 = c_parser_expr_no_commas (parser, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree c; c = e1.value; if (TREE_CODE (c) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (c))) error_at (loc, "first argument to %<__builtin_choose_expr%> not" " a constant"); constant_expression_warning (c); expr = integer_zerop (c) ? e3 : e2; } break; case RID_TYPES_COMPATIBLE_P: c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; break; } if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } t2 = c_parser_type_name (parser); if (t2 == NULL) { expr.value = error_mark_node; break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree e1, e2; e1 = TYPE_MAIN_VARIANT (groktypename (t1, NULL, NULL)); e2 = TYPE_MAIN_VARIANT (groktypename (t2, NULL, NULL)); expr.value = comptypes (e1, e2) ? build_int_cst (NULL_TREE, 1) : build_int_cst (NULL_TREE, 0); } break; case RID_AT_SELECTOR: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } { tree sel = c_parser_objc_selector_arg (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_selector_expr (loc, sel); } break; case RID_AT_PROTOCOL: gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_protocol_expr (id); } break; case RID_AT_ENCODE: /* Extension to support C-structures in the archiver. */ gcc_assert (c_dialect_objc ()); c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name (parser); if (t1 == NULL) { expr.value = error_mark_node; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree type = groktypename (t1, NULL, NULL); expr.value = objc_build_encode_expr (type); } break; default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } break; case CPP_OPEN_SQUARE: if (c_dialect_objc ()) { tree receiver, args; c_parser_consume_token (parser); receiver = c_parser_objc_receiver (parser); args = c_parser_objc_message_args (parser); c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = objc_build_message_expr (build_tree_list (receiver, args)); break; } /* Else fall through to report error. */ default: c_parser_error (parser, "expected expression"); expr.value = error_mark_node; break; } return c_parser_postfix_expression_after_primary (parser, loc, expr); } /* Parse a postfix expression after a parenthesized type name: the brace-enclosed initializer of a compound literal, possibly followed by some postfix operators. This is separate because it is not possible to tell until after the type name whether a cast expression has a cast or a compound literal, or whether the operand of sizeof is a parenthesized type name or starts with a compound literal. TYPE_LOC is the location where TYPE_NAME starts--the location of the first token after the parentheses around the type name. */ static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *parser, struct c_type_name *type_name, location_t type_loc) { tree type; struct c_expr init; bool non_const; struct c_expr expr; location_t start_loc; tree type_expr = NULL_TREE; bool type_expr_const = true; check_compound_literal_type (type_loc, type_name); start_init (NULL_TREE, NULL, 0); type = groktypename (type_name, &type_expr, &type_expr_const); start_loc = c_parser_peek_token (parser)->location; if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type)) { error_at (type_loc, "compound literal has variable size"); type = error_mark_node; } init = c_parser_braced_init (parser, type, false); finish_init (); maybe_warn_string_init (type, init); if (type != error_mark_node && !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type)) && current_function_decl) { error ("compound literal qualified by address-space qualifier"); type = error_mark_node; } if (!flag_isoc99) pedwarn (start_loc, OPT_pedantic, "ISO C90 forbids compound literals"); non_const = ((init.value && TREE_CODE (init.value) == CONSTRUCTOR) ? CONSTRUCTOR_NON_CONST (init.value) : init.original_code == C_MAYBE_CONST_EXPR); non_const |= !type_expr_const; expr.value = build_compound_literal (start_loc, type, init.value, non_const); expr.original_code = ERROR_MARK; expr.original_type = NULL; if (type_expr) { if (TREE_CODE (expr.value) == C_MAYBE_CONST_EXPR) { gcc_assert (C_MAYBE_CONST_EXPR_PRE (expr.value) == NULL_TREE); C_MAYBE_CONST_EXPR_PRE (expr.value) = type_expr; } else { gcc_assert (!non_const); expr.value = build2 (C_MAYBE_CONST_EXPR, type, type_expr, expr.value); } } return c_parser_postfix_expression_after_primary (parser, start_loc, expr); } /* Parse a postfix expression after the initial primary or compound literal; that is, parse a series of postfix operators. EXPR_LOC is the location of the primary expression. */ static struct c_expr c_parser_postfix_expression_after_primary (c_parser *parser, location_t expr_loc, struct c_expr expr) { struct c_expr orig_expr; tree ident, idx; VEC(tree,gc) *exprlist; VEC(tree,gc) *origtypes; while (true) { location_t op_loc = c_parser_peek_token (parser)->location; switch (c_parser_peek_token (parser)->type) { case CPP_OPEN_SQUARE: /* Array reference. */ c_parser_consume_token (parser); idx = c_parser_expression (parser).value; c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = build_array_ref (op_loc, expr.value, idx); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_OPEN_PAREN: /* Function call. */ c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN)) exprlist = NULL; else exprlist = c_parser_expr_list (parser, true, false, &origtypes); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); orig_expr = expr; /* FIXME diagnostics: Ideally we want the FUNCNAME, not the "(" after the FUNCNAME, which is what we have now. */ expr.value = build_function_call_vec (op_loc, expr.value, exprlist, origtypes); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) == INTEGER_CST && TREE_CODE (orig_expr.value) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (orig_expr.value) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (orig_expr.value) == BUILT_IN_CONSTANT_P) expr.original_code = C_MAYBE_CONST_EXPR; expr.original_type = NULL; if (exprlist != NULL) { release_tree_vector (exprlist); release_tree_vector (origtypes); } break; case CPP_DOT: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, expr.value, ident); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_DEREF: /* Structure element reference. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); if (c_parser_next_token_is (parser, CPP_NAME)) ident = c_parser_peek_token (parser)->value; else { c_parser_error (parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token (parser); expr.value = build_component_ref (op_loc, build_indirect_ref (op_loc, expr.value, RO_ARROW), ident); expr.original_code = ERROR_MARK; if (TREE_CODE (expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND (expr.value, 1); if (TREE_CODE (field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE (field); } break; case CPP_PLUS_PLUS: /* Postincrement. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTINCREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_MINUS_MINUS: /* Postdecrement. */ c_parser_consume_token (parser); expr = default_function_array_conversion (expr_loc, expr); expr.value = build_unary_op (op_loc, POSTDECREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; default: return expr; } } } /* Parse an expression (C90 6.3.17, C99 6.5.17). expression: assignment-expression expression , assignment-expression */ static struct c_expr c_parser_expression (c_parser *parser) { struct c_expr expr; expr = c_parser_expr_no_commas (parser, NULL); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_expr next; location_t loc = c_parser_peek_token (parser)->location; location_t expr_loc; c_parser_consume_token (parser); expr_loc = c_parser_peek_token (parser)->location; next = c_parser_expr_no_commas (parser, NULL); next = default_function_array_conversion (expr_loc, next); expr.value = build_compound_expr (loc, expr.value, next.value); expr.original_code = COMPOUND_EXPR; expr.original_type = next.original_type; } return expr; } /* Parse an expression and convert functions or arrays to pointers. */ static struct c_expr c_parser_expression_conv (c_parser *parser) { struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; expr = c_parser_expression (parser); expr = default_function_array_conversion (loc, expr); return expr; } /* Parse a non-empty list of expressions. If CONVERT_P, convert functions and arrays to pointers. If FOLD_P, fold the expressions. nonempty-expr-list: assignment-expression nonempty-expr-list , assignment-expression */ static VEC(tree,gc) * c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p, VEC(tree,gc) **p_orig_types) { VEC(tree,gc) *ret; VEC(tree,gc) *orig_types; struct c_expr expr; location_t loc = c_parser_peek_token (parser)->location; ret = make_tree_vector (); if (p_orig_types == NULL) orig_types = NULL; else orig_types = make_tree_vector (); expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = default_function_array_conversion (loc, expr); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); VEC_quick_push (tree, ret, expr.value); if (orig_types != NULL) VEC_quick_push (tree, orig_types, expr.original_type); while (c_parser_next_token_is (parser, CPP_COMMA)) { c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; expr = c_parser_expr_no_commas (parser, NULL); if (convert_p) expr = default_function_array_conversion (loc, expr); if (fold_p) expr.value = c_fully_fold (expr.value, false, NULL); VEC_safe_push (tree, gc, ret, expr.value); if (orig_types != NULL) VEC_safe_push (tree, gc, orig_types, expr.original_type); } if (orig_types != NULL) *p_orig_types = orig_types; return ret; } /* Parse Objective-C-specific constructs. */ /* Parse an objc-class-definition. objc-class-definition: @interface identifier objc-superclass[opt] objc-protocol-refs[opt] objc-class-instance-variables[opt] objc-methodprotolist @end @implementation identifier objc-superclass[opt] objc-class-instance-variables[opt] @interface identifier ( identifier ) objc-protocol-refs[opt] objc-methodprotolist @end @implementation identifier ( identifier ) objc-superclass: : identifier "@interface identifier (" must start "@interface identifier ( identifier ) ...": objc-methodprotolist in the first production may not start with a parenthesized identifier as a declarator of a data definition with no declaration specifiers if the objc-superclass, objc-protocol-refs and objc-class-instance-variables are omitted. */ static void c_parser_objc_class_definition (c_parser *parser) { bool iface_p; tree id1; tree superclass; if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE)) iface_p = true; else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION)) iface_p = false; else gcc_unreachable (); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree id2; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!iface_p) { objc_start_category_implementation (id1, id2); return; } if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_category_interface (id1, id2, proto); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); return; } if (c_parser_next_token_is (parser, CPP_COLON)) { c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } superclass = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else superclass = NULL_TREE; if (iface_p) { tree proto = NULL_TREE; if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); objc_start_class_interface (id1, superclass, proto); } else objc_start_class_implementation (id1, superclass); if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) c_parser_objc_class_instance_variables (parser); if (iface_p) { objc_continue_interface (); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface (); } else { objc_continue_implementation (); return; } } /* Parse objc-class-instance-variables. objc-class-instance-variables: { objc-instance-variable-decl-list[opt] } objc-instance-variable-decl-list: objc-visibility-spec objc-instance-variable-decl ; ; objc-instance-variable-decl-list objc-visibility-spec objc-instance-variable-decl-list objc-instance-variable-decl ; objc-instance-variable-decl-list ; objc-visibility-spec: @private @protected @public objc-instance-variable-decl: struct-declaration */ static void c_parser_objc_class_instance_variables (c_parser *parser) { gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE)); c_parser_consume_token (parser); while (c_parser_next_token_is_not (parser, CPP_EOF)) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "extra semicolon in struct or union specified"); c_parser_consume_token (parser); continue; } /* Stop if at the end of the instance variables. */ if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); break; } /* Parse any objc-visibility-spec. */ if (c_parser_next_token_is_keyword (parser, RID_PRIVATE)) { c_parser_consume_token (parser); objc_set_visibility (2); continue; } else if (c_parser_next_token_is_keyword (parser, RID_PROTECTED)) { c_parser_consume_token (parser); objc_set_visibility (0); continue; } else if (c_parser_next_token_is_keyword (parser, RID_PUBLIC)) { c_parser_consume_token (parser); objc_set_visibility (1); continue; } else if (c_parser_next_token_is (parser, CPP_PRAGMA)) { c_parser_pragma (parser, pragma_external); continue; } /* Parse some comma-separated declarations. */ decls = c_parser_struct_declaration (parser); { /* Comma-separated instance variables are chained together in reverse order; add them one by one. */ tree ivar = nreverse (decls); for (; ivar; ivar = TREE_CHAIN (ivar)) objc_add_instance_variable (copy_node (ivar)); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } } /* Parse an objc-class-declaration. objc-class-declaration: @class identifier-list ; */ static void c_parser_objc_class_declaration (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is_keyword (parser, RID_CLASS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_class (list); } /* Parse an objc-alias-declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; */ static void c_parser_objc_alias_declaration (c_parser *parser) { tree id1, id2; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id1 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL); return; } id2 = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_alias (id1, id2); } /* Parse an objc-protocol-definition. objc-protocol-definition: @protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end @protocol identifier-list ; "@protocol identifier ;" should be resolved as "@protocol identifier-list ;": objc-methodprotolist may not start with a semicolon in the first alternative if objc-protocol-refs are omitted. */ static void c_parser_objc_protocol_definition (c_parser *parser) { gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL)); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return; } if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA || c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON) { tree list = NULL_TREE; /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_protocols (list); } else { tree id = c_parser_peek_token (parser)->value; tree proto = NULL_TREE; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_LESS)) proto = c_parser_objc_protocol_refs (parser); parser->objc_pq_context = true; objc_start_protocol (id, proto); c_parser_objc_methodprotolist (parser); c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>"); parser->objc_pq_context = false; objc_finish_interface (); } } /* Parse an objc-method-type. objc-method-type: + - */ static enum tree_code c_parser_objc_method_type (c_parser *parser) { switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: c_parser_consume_token (parser); return PLUS_EXPR; case CPP_MINUS: c_parser_consume_token (parser); return MINUS_EXPR; default: gcc_unreachable (); } } /* Parse an objc-method-definition. objc-method-definition: objc-method-type objc-method-decl ;[opt] compound-statement */ static void c_parser_objc_method_definition (c_parser *parser) { enum tree_code type = c_parser_objc_method_type (parser); tree decl; objc_set_method_type (type); parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser); if (c_parser_next_token_is (parser, CPP_SEMICOLON)) { c_parser_consume_token (parser); pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "extra semicolon in method definition specified"); } if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_error (parser, "expected %<{%>"); return; } parser->objc_pq_context = false; objc_start_method_definition (decl); add_stmt (c_parser_compound_statement (parser)); objc_finish_method_definition (current_function_decl); } /* Parse an objc-methodprotolist. objc-methodprotolist: empty objc-methodprotolist objc-methodproto objc-methodprotolist declaration objc-methodprotolist ; The declaration is a data definition, which may be missing declaration specifiers under the same rules and diagnostics as other data definitions outside functions, and the stray semicolon is diagnosed the same way as a stray semicolon outside a function. */ static void c_parser_objc_methodprotolist (c_parser *parser) { while (true) { /* The list is terminated by @end. */ switch (c_parser_peek_token (parser)->type) { case CPP_SEMICOLON: pedwarn (c_parser_peek_token (parser)->location, OPT_pedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token (parser); break; case CPP_PLUS: case CPP_MINUS: c_parser_objc_methodproto (parser); break; case CPP_PRAGMA: c_parser_pragma (parser, pragma_external); break; case CPP_EOF: return; default: if (c_parser_next_token_is_keyword (parser, RID_AT_END)) return; c_parser_declaration_or_fndef (parser, false, true, false, true); break; } } } /* Parse an objc-methodproto. objc-methodproto: objc-method-type objc-method-decl ; */ static void c_parser_objc_methodproto (c_parser *parser) { enum tree_code type = c_parser_objc_method_type (parser); tree decl; objc_set_method_type (type); /* Remember protocol qualifiers in prototypes. */ parser->objc_pq_context = true; decl = c_parser_objc_method_decl (parser); /* Forget protocol qualifiers here. */ parser->objc_pq_context = false; objc_add_method_declaration (decl); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse an objc-method-decl. objc-method-decl: ( objc-type-name ) objc-selector objc-selector ( objc-type-name ) objc-keyword-selector objc-optparmlist objc-keyword-selector objc-optparmlist objc-keyword-selector: objc-keyword-decl objc-keyword-selector objc-keyword-decl objc-keyword-decl: objc-selector : ( objc-type-name ) identifier objc-selector : identifier : ( objc-type-name ) identifier : identifier objc-optparmlist: objc-optparms objc-optellipsis objc-optparms: empty objc-opt-parms , parameter-declaration objc-optellipsis: empty , ... */ static tree c_parser_objc_method_decl (c_parser *parser) { tree type = NULL_TREE; tree sel; tree parms = NULL_TREE; bool ellipsis = false; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); type = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } sel = c_parser_objc_selector (parser); /* If there is no selector, or a colon follows, we have an objc-keyword-selector. If there is a selector, and a colon does not follow, that selector ends the objc-method-decl. */ if (!sel || c_parser_next_token_is (parser, CPP_COLON)) { tree tsel = sel; tree list = NULL_TREE; while (true) { tree atype = NULL_TREE, id, keyworddecl; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) break; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); atype = c_parser_objc_type_name (parser); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); return error_mark_node; } id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); keyworddecl = objc_build_keyword_decl (tsel, atype, id); list = chainon (list, keyworddecl); tsel = c_parser_objc_selector (parser); if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } /* Parse the optional parameter list. Optional Objective-C method parameters follow the C syntax, and may include '...' to denote a variable number of arguments. */ parms = make_node (TREE_LIST); while (c_parser_next_token_is (parser, CPP_COMMA)) { struct c_parm *parm; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_ELLIPSIS)) { ellipsis = true; c_parser_consume_token (parser); break; } parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) break; parms = chainon (parms, build_tree_list (NULL_TREE, grokparm (parm))); } sel = list; } return objc_build_method_signature (type, sel, parms, ellipsis); } /* Parse an objc-type-name. objc-type-name: objc-type-qualifiers[opt] type-name objc-type-qualifiers[opt] objc-type-qualifiers: objc-type-qualifier objc-type-qualifiers objc-type-qualifier objc-type-qualifier: one of in out inout bycopy byref oneway */ static tree c_parser_objc_type_name (c_parser *parser) { tree quals = NULL_TREE; struct c_type_name *type_name = NULL; tree type = NULL_TREE; while (true) { c_token *token = c_parser_peek_token (parser); if (token->type == CPP_KEYWORD && (token->keyword == RID_IN || token->keyword == RID_OUT || token->keyword == RID_INOUT || token->keyword == RID_BYCOPY || token->keyword == RID_BYREF || token->keyword == RID_ONEWAY)) { quals = chainon (quals, build_tree_list (NULL_TREE, token->value)); c_parser_consume_token (parser); } else break; } if (c_parser_next_token_starts_typename (parser)) type_name = c_parser_type_name (parser); if (type_name) type = groktypename (type_name, NULL, NULL); return build_tree_list (quals, type); } /* Parse objc-protocol-refs. objc-protocol-refs: < identifier-list > */ static tree c_parser_objc_protocol_refs (c_parser *parser) { tree list = NULL_TREE; gcc_assert (c_parser_next_token_is (parser, CPP_LESS)); c_parser_consume_token (parser); /* Any identifiers, including those declared as type names, are OK here. */ while (true) { tree id; if (c_parser_next_token_is_not (parser, CPP_NAME)) { c_parser_error (parser, "expected identifier"); break; } id = c_parser_peek_token (parser)->value; list = chainon (list, build_tree_list (NULL_TREE, id)); c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); else break; } c_parser_require (parser, CPP_GREATER, "expected %<>%>"); return list; } /* Parse an objc-try-catch-statement. objc-try-catch-statement: @try compound-statement objc-catch-list[opt] @try compound-statement objc-catch-list[opt] @finally compound-statement objc-catch-list: @catch ( parameter-declaration ) compound-statement objc-catch-list @catch ( parameter-declaration ) compound-statement */ static void c_parser_objc_try_catch_statement (c_parser *parser) { location_t loc; tree stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_TRY)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; stmt = c_parser_compound_statement (parser); objc_begin_try_stmt (loc, stmt); while (c_parser_next_token_is_keyword (parser, RID_CATCH)) { struct c_parm *parm; c_parser_consume_token (parser); if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) break; parm = c_parser_parameter_declaration (parser, NULL_TREE); if (parm == NULL) { c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); objc_begin_catch_clause (grokparm (parm)); if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) c_parser_compound_statement_nostart (parser); objc_finish_catch_clause (); } if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY)) { location_t finloc; tree finstmt; c_parser_consume_token (parser); finloc = c_parser_peek_token (parser)->location; finstmt = c_parser_compound_statement (parser); objc_build_finally_clause (finloc, finstmt); } objc_finish_try_stmt (); } /* Parse an objc-synchronized-statement. objc-synchronized-statement: @synchronized ( expression ) compound-statement */ static void c_parser_objc_synchronized_statement (c_parser *parser) { location_t loc; tree expr, stmt; gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED)); c_parser_consume_token (parser); loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr = c_parser_expression (parser).value; expr = c_fully_fold (expr, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else expr = error_mark_node; stmt = c_parser_compound_statement (parser); objc_build_synchronized (loc, expr, stmt); } /* Parse an objc-selector; return NULL_TREE without an error if the next token is not an objc-selector. objc-selector: identifier one of enum struct union if else while do for switch case default break continue return goto asm sizeof typeof __alignof unsigned long const short volatile signed restrict _Complex in out inout bycopy byref oneway int char float double void _Bool ??? Why this selection of keywords but not, for example, storage class specifiers? */ static tree c_parser_objc_selector (c_parser *parser) { c_token *token = c_parser_peek_token (parser); tree value = token->value; if (token->type == CPP_NAME) { c_parser_consume_token (parser); return value; } if (token->type != CPP_KEYWORD) return NULL_TREE; switch (token->keyword) { case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_IF: case RID_ELSE: case RID_WHILE: case RID_DO: case RID_FOR: case RID_SWITCH: case RID_CASE: case RID_DEFAULT: case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: case RID_ASM: case RID_SIZEOF: case RID_TYPEOF: case RID_ALIGNOF: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_SHORT: case RID_VOLATILE: case RID_SIGNED: case RID_RESTRICT: case RID_COMPLEX: case RID_IN: case RID_OUT: case RID_INOUT: case RID_BYCOPY: case RID_BYREF: case RID_ONEWAY: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_BOOL: c_parser_consume_token (parser); return value; default: return NULL_TREE; } } /* Parse an objc-selector-arg. objc-selector-arg: objc-selector objc-keywordname-list objc-keywordname-list: objc-keywordname objc-keywordname-list objc-keywordname objc-keywordname: objc-selector : : */ static tree c_parser_objc_selector_arg (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return list; list = chainon (list, build_tree_list (sel, NULL_TREE)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-receiver. objc-receiver: expression class-name type-name */ static tree c_parser_objc_receiver (c_parser *parser) { if (c_parser_peek_token (parser)->type == CPP_NAME && (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)) { tree id = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); return objc_get_class_reference (id); } return c_fully_fold (c_parser_expression (parser).value, false, NULL); } /* Parse objc-message-args. objc-message-args: objc-selector objc-keywordarg-list objc-keywordarg-list: objc-keywordarg objc-keywordarg-list objc-keywordarg objc-keywordarg: objc-selector : objc-keywordexpr : objc-keywordexpr */ static tree c_parser_objc_message_args (c_parser *parser) { tree sel = c_parser_objc_selector (parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not (parser, CPP_COLON)) return sel; while (true) { tree keywordexpr; if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")) return error_mark_node; keywordexpr = c_parser_objc_keywordexpr (parser); list = chainon (list, build_tree_list (sel, keywordexpr)); sel = c_parser_objc_selector (parser); if (!sel && c_parser_next_token_is_not (parser, CPP_COLON)) break; } return list; } /* Parse an objc-keywordexpr. objc-keywordexpr: nonempty-expr-list */ static tree c_parser_objc_keywordexpr (c_parser *parser) { tree ret; VEC(tree,gc) *expr_list = c_parser_expr_list (parser, true, true, NULL); if (VEC_length (tree, expr_list) == 1) { /* Just return the expression, remove a level of indirection. */ ret = VEC_index (tree, expr_list, 0); } else { /* We have a comma expression, we will collapse later. */ ret = build_tree_list_vec (expr_list); } release_tree_vector (expr_list); return ret; } /* Handle pragmas. Some OpenMP pragmas are associated with, and therefore should be considered, statements. ALLOW_STMT is true if we're within the context of a function and such pragmas are to be allowed. Returns true if we actually parsed such a pragma. */ static bool c_parser_pragma (c_parser *parser, enum pragma_context context) { unsigned int id; id = c_parser_peek_token (parser)->pragma_kind; gcc_assert (id != PRAGMA_NONE); switch (id) { case PRAGMA_OMP_BARRIER: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%< "used in compound statements"); goto bad_stmt; } c_parser_omp_barrier (parser); return false; case PRAGMA_OMP_FLUSH: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%< "used in compound statements"); goto bad_stmt; } c_parser_omp_flush (parser); return false; case PRAGMA_OMP_TASKWAIT: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error (parser, "%< "used in compound statements"); goto bad_stmt; } c_parser_omp_taskwait (parser); return false; case PRAGMA_OMP_THREADPRIVATE: c_parser_omp_threadprivate (parser); return false; case PRAGMA_OMP_SECTION: error_at (c_parser_peek_token (parser)->location, "%< "%< c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_GCC_PCH_PREPROCESS: c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; default: if (id < PRAGMA_FIRST_EXTERNAL) { if (context == pragma_external) { bad_stmt: c_parser_error (parser, "expected declaration specifiers"); c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_omp_construct (parser); return true; } break; } c_parser_consume_pragma (parser); c_invoke_pragma_handler (id); /* Skip to EOL, but suppress any error message. Those will have been generated by the handler routine through calling error, as opposed to calling c_parser_error. */ parser->error = true; c_parser_skip_to_pragma_eol (parser); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { c_token *tok = c_parser_peek_token (the_parser); enum cpp_ttype ret = tok->type; *value = tok->value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else { if (ret == CPP_KEYWORD) ret = CPP_NAME; c_parser_consume_token (the_parser); } return ret; } static void c_parser_pragma_pch_preprocess (c_parser *parser) { tree name = NULL; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_STRING)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); } else c_parser_error (parser, "expected string literal"); c_parser_skip_to_pragma_eol (parser); if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); } /* OpenMP 2.5 parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause c_parser_omp_clause_name (c_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (c_parser_next_token_is_keyword (parser, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'c': if (!strcmp ("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; break; case 'f': if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; break; case 'n': if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'p': if (!strcmp ("private", p)) result = PRAGMA_OMP_CLAUSE_PRIVATE; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; break; case 'u': if (!strcmp ("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) c_parser_consume_token (parser); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum omp_clause_code code, const char *name) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { location_t loc = OMP_CLAUSE_LOCATION (c); error_at (loc, "too many %qs clauses", name); break; } } /* OpenMP 2.5: variable-list: identifier variable-list , identifier If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is nonzero, CLAUSE_LOC is the location of the clause. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree c_parser_omp_variable_list (c_parser *parser, location_t clause_loc, enum omp_clause_code kind, tree list) { if (c_parser_next_token_is_not (parser, CPP_NAME) || c_parser_peek_token (parser)->id_kind != C_ID_ID) c_parser_error (parser, "expected identifier"); while (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_token (parser)->id_kind == C_ID_ID) { tree t = lookup_name (c_parser_peek_token (parser)->value); if (t == NULL_TREE) undeclared_variable (c_parser_peek_token (parser)->location, c_parser_peek_token (parser)->value); else if (t == error_mark_node) ; else if (kind != 0) { tree u = build_omp_clause (clause_loc, kind); OMP_CLAUSE_DECL (u) = t; OMP_CLAUSE_CHAIN (u) = list; list = u; } else list = tree_cons (t, NULL_TREE, list); c_parser_consume_token (parser); if (c_parser_next_token_is_not (parser, CPP_COMMA)) break; c_parser_consume_token (parser); } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for omp clauses. */ static tree c_parser_omp_var_list_parens (c_parser *parser, enum omp_clause_code kind, tree list) { /* The clauses location. */ location_t loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { list = c_parser_omp_variable_list (parser, loc, kind, list); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenMP 3.0: collapse ( constant-expression ) */ static tree c_parser_omp_clause_collapse (c_parser *parser, tree list) { tree c, num = error_mark_node; HOST_WIDE_INT n; location_t loc; check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse"); loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { num = c_parser_expr_no_commas (parser, NULL).value; c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } if (num == error_mark_node) return list; if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !host_integerp (num, 0) || (n = tree_low_cst (num, 0)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); return list; } c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR (c) = num; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: copyin ( variable-list ) */ static tree c_parser_omp_clause_copyin (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list); } /* OpenMP 2.5: copyprivate ( variable-list ) */ static tree c_parser_omp_clause_copyprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list); } /* OpenMP 2.5: default ( shared | none ) */ static tree c_parser_omp_clause_default (c_parser *parser, tree list) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; location_t loc = c_parser_peek_token (parser)->location; tree c; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } c_parser_consume_token (parser); } else { invalid_kind: c_parser_error (parser, "expected %<none%> or %<shared%>"); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default"); c = build_omp_clause (loc, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 2.5: firstprivate ( variable-list ) */ static tree c_parser_omp_clause_firstprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list); } /* OpenMP 2.5: if ( expression ) */ static tree c_parser_omp_clause_if (c_parser *parser, tree list) { location_t loc = c_parser_peek_token (parser)->location; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { tree t = c_parser_paren_condition (parser); tree c; check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if"); c = build_omp_clause (loc, OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } else c_parser_error (parser, "expected %<(%>"); return list; } /* OpenMP 2.5: lastprivate ( variable-list ) */ static tree c_parser_omp_clause_lastprivate (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LASTPRIVATE, list); } /* OpenMP 2.5: nowait */ static tree c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; location_t loc = c_parser_peek_token (parser)->location; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait"); c = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: num_threads ( expression ) */ static tree c_parser_omp_clause_num_threads (c_parser *parser, tree list) { location_t num_threads_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token (parser)->location; tree c, t = c_parser_expression (parser).value; t = c_fully_fold (t, false, NULL); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { c_parser_error (parser, "expected integer expression"); return list; } /* Attempt to statically determine when the number isn't positive. */ c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); if (CAN_HAVE_LOCATION_P (c)) SET_EXPR_LOCATION (c, expr_loc); if (c == boolean_true_node) { warning_at (expr_loc, 0, "%<num_threads%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads"); c = build_omp_clause (num_threads_loc, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; } return list; } /* OpenMP 2.5: ordered */ static tree c_parser_omp_clause_ordered (c_parser *parser, tree list) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: private ( variable-list ) */ static tree c_parser_omp_clause_private (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list); } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || */ static tree c_parser_omp_clause_reduction (c_parser *parser, tree list) { location_t clause_loc = c_parser_peek_token (parser)->location; if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) { enum tree_code code; switch (c_parser_peek_token (parser)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; default: c_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, or %<||%>"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } c_parser_consume_token (parser); if (c_parser_require (parser, CPP_COLON, "expected %<:%>")) { tree nl, c; nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_REDUCTION, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_REDUCTION_CODE (c) = code; list = nl; } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime | auto */ static tree c_parser_omp_clause_schedule (c_parser *parser, tree list) { tree c, t; location_t loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; c = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE); if (c_parser_next_token_is (parser, CPP_NAME)) { tree kind = c_parser_peek_token (parser)->value; const char *p = IDENTIFIER_POINTER (kind); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (c_parser_next_token_is_keyword (parser, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (c_parser_next_token_is_keyword (parser, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_COMMA)) { location_t here; c_parser_consume_token (parser); here = c_parser_peek_token (parser)->location; t = c_parser_expr_no_commas (parser, NULL).value; t = c_fully_fold (t, false, NULL); if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at (here, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at (here, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE) OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; else c_parser_error (parser, "expected integer expression"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: c_parser_error (parser, "invalid schedule kind"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0); return list; } /* OpenMP 2.5: shared ( variable-list ) */ static tree c_parser_omp_clause_shared (c_parser *parser, tree list) { return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list); } /* OpenMP 3.0: untied */ static tree c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; /* FIXME: Should we allow duplicates? */ check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied"); c = build_omp_clause (c_parser_peek_token (parser)->location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found; the result of clause default goes in *pdefault. */ static tree c_parser_omp_all_clauses (c_parser *parser, unsigned int mask, const char *where) { tree clauses = NULL; bool first = true; while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && c_parser_next_token_is (parser, CPP_COMMA)) c_parser_consume_token (parser); first = false; here = c_parser_peek_token (parser)->location; c_kind = c_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = c_parser_omp_clause_collapse (parser, clauses); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = c_parser_omp_clause_copyin (parser, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = c_parser_omp_clause_copyprivate (parser, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = c_parser_omp_clause_default (parser, clauses); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate (parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = c_parser_omp_clause_if (parser, clauses); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = c_parser_omp_clause_lastprivate (parser, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = c_parser_omp_clause_nowait (parser, clauses); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = c_parser_omp_clause_num_threads (parser, clauses); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = c_parser_omp_clause_ordered (parser, clauses); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private (parser, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = c_parser_omp_clause_schedule (parser, clauses); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = c_parser_omp_clause_shared (parser, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = c_parser_omp_clause_untied (parser, clauses); c_name = "untied"; break; default: c_parser_error (parser, "expected %< goto saw_error; } if (((mask >> c_kind) & 1) == 0 && !parser->error) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (here, "%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol (parser); return c_finish_omp_clauses (clauses); } /* OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that c_parser_statement calls add_stmt. */ static tree c_parser_omp_structured_block (c_parser *parser) { tree stmt = push_stmt_list (); c_parser_statement (parser); return pop_stmt_list (stmt); } /* OpenMP 2.5: expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. LOC is the location of the #pragma token. */ static void c_parser_omp_atomic (location_t loc, c_parser *parser) { tree lhs, rhs; tree stmt; enum tree_code code; struct c_expr rhs_expr; c_parser_skip_to_pragma_eol (parser); lhs = c_parser_unary_expression (parser).value; lhs = c_fully_fold (lhs, false, NULL); switch (TREE_CODE (lhs)) { case ERROR_MARK: saw_error: c_parser_skip_to_end_of_block_or_statement (parser); return; case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = PLUS_EXPR; rhs = integer_one_node; break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR && TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0) && TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE (lhs) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep (TREE_OPERAND (lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); code = NOP_EXPR; break; } if (TREE_CODE (TREE_OPERAND (lhs, 1)) == TRUTH_NOT_EXPR && TREE_OPERAND (lhs, 0) == TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) { /* This is pre or post decrement. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); code = NOP_EXPR; break; } } /* FALLTHRU */ default: switch (c_parser_peek_token (parser)->type) { case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; default: c_parser_error (parser, "invalid operator for %< goto saw_error; } c_parser_consume_token (parser); { location_t rhs_loc = c_parser_peek_token (parser)->location; rhs_expr = c_parser_expression (parser); rhs_expr = default_function_array_conversion (rhs_loc, rhs_expr); } rhs = rhs_expr.value; rhs = c_fully_fold (rhs, false, NULL); break; } stmt = c_finish_omp_atomic (loc, code, lhs, rhs); if (stmt != error_mark_node) add_stmt (stmt); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } /* OpenMP 2.5: */ static void c_parser_omp_barrier (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_barrier (loc); } /* OpenMP 2.5: structured-block LOC is the location of the #pragma itself. */ static tree c_parser_omp_critical (location_t loc, c_parser *parser) { tree stmt, name = NULL; if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) { c_parser_consume_token (parser); if (c_parser_next_token_is (parser, CPP_NAME)) { name = c_parser_peek_token (parser)->value; c_parser_consume_token (parser); c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_error (parser, "expected identifier"); } else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); stmt = c_parser_omp_structured_block (parser); return c_finish_omp_critical (loc, stmt, name); } /* OpenMP 2.5: flush-vars: ( variable-list ) */ static void c_parser_omp_flush (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)) c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL)) c_parser_error (parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol (parser); c_finish_omp_flush (loc); } /* Parse the restricted form of the for statement allowed by OpenMP. The real trick here is to determine the loop control variable early so that we can push a new decl if necessary to make it private. LOC is the location of the OMP in " static tree c_parser_omp_for_loop (location_t loc, c_parser *parser, tree clauses, tree *par_clauses) { tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl; tree declv, condv, incrv, initv, for_block = NULL, ret = NULL; bool fail = false, open_brace_parsed = false; int i, collapse = 1, nbraces = 0; location_t for_loc; for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); gcc_assert (collapse >= 1); declv = make_tree_vec (collapse); initv = make_tree_vec (collapse); condv = make_tree_vec (collapse); incrv = make_tree_vec (collapse); if (!c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_error (parser, "for statement expected"); return NULL; } for_loc = c_parser_peek_token (parser)->location; c_parser_consume_token (parser); for (i = 0; i < collapse; i++) { int bracecount = 0; if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) goto pop_scopes; /* Parse the initialization declaration or expression. */ if (c_parser_next_token_starts_declspecs (parser)) { if (i > 0) for_block = tree_cons (NULL, c_begin_compound_stmt (true), for_block); c_parser_declaration_or_fndef (parser, true, true, true, true); decl = check_for_loop_decls (for_loc); if (decl == NULL) goto error_init; if (DECL_INITIAL (decl) == error_mark_node) decl = error_mark_node; init = decl; } else if (c_parser_next_token_is (parser, CPP_NAME) && c_parser_peek_2nd_token (parser)->type == CPP_EQ) { struct c_expr decl_exp; struct c_expr init_exp; location_t init_loc; decl_exp = c_parser_postfix_expression (parser); decl = decl_exp.value; c_parser_require (parser, CPP_EQ, "expected %<=%>"); init_loc = c_parser_peek_token (parser)->location; init_exp = c_parser_expr_no_commas (parser, NULL); init_exp = default_function_array_conversion (init_loc, init_exp); init = build_modify_expr (init_loc, decl, decl_exp.original_type, NOP_EXPR, init_loc, init_exp.value, init_exp.original_type); init = c_process_expr_stmt (init_loc, init); c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); } else { error_init: c_parser_error (parser, "expected iteration declaration or initialization"); c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); fail = true; goto parse_next; } /* Parse the loop condition. */ cond = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_SEMICOLON)) { location_t cond_loc = c_parser_peek_token (parser)->location; struct c_expr cond_expr = c_parser_binary_expression (parser, NULL); cond = cond_expr.value; cond = c_objc_common_truthvalue_conversion (cond_loc, cond); cond = c_fully_fold (cond, false, NULL); switch (cond_expr.original_code) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; default: /* Can't be cond = error_mark_node, because we want to preserve the location until c_finish_omp_for. */ cond = build1 (NOP_EXPR, boolean_type_node, error_mark_node); break; } protected_set_expr_location (cond, cond_loc); } c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>"); /* Parse the increment expression. */ incr = NULL_TREE; if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN)) { location_t incr_loc = c_parser_peek_token (parser)->location; incr = c_process_expr_stmt (incr_loc, c_parser_expression (parser).value); } c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (decl == NULL || decl == error_mark_node || init == error_mark_node) fail = true; else { TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; } parse_next: if (i == collapse - 1) break; /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed in between the collapsed for loops to be still considered perfectly nested. Hopefully the final version clarifies this. For now handle (multiple) {'s and empty statements. */ do { if (c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_consume_token (parser); break; } else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE)) { c_parser_consume_token (parser); bracecount++; } else if (bracecount && c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "not enough perfectly nested loops"); if (bracecount) { open_brace_parsed = true; bracecount--; } fail = true; collapse = 0; break; } } while (1); nbraces += bracecount; } save_break = c_break_label; c_break_label = size_one_node; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = push_stmt_list (); if (open_brace_parsed) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); c_parser_compound_statement_nostart (parser); add_stmt (c_end_compound_stmt (here, stmt, true)); } else add_stmt (c_parser_c99_block_statement (parser)); if (c_cont_label) { tree t = build1 (LABEL_EXPR, void_type_node, c_cont_label); SET_EXPR_LOCATION (t, loc); add_stmt (t); } body = pop_stmt_list (body); c_break_label = save_break; c_cont_label = save_cont; while (nbraces) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) { c_parser_consume_token (parser); nbraces--; } else if (c_parser_next_token_is (parser, CPP_SEMICOLON)) c_parser_consume_token (parser); else { c_parser_error (parser, "collapsed loops not perfectly nested"); while (nbraces) { location_t here = c_parser_peek_token (parser)->location; stmt = c_begin_compound_stmt (true); add_stmt (body); c_parser_compound_statement_nostart (parser); body = c_end_compound_stmt (here, stmt, true); nbraces--; } goto pop_scopes; } } /* Only bother calling c_finish_omp_for if we haven't already generated an error from the initialization parsing. */ if (!fail) { stmt = c_finish_omp_for (loc, declv, initv, condv, incrv, body, NULL); if (stmt) { if (par_clauses != NULL) { tree *c; for (c = par_clauses; *c ; ) if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE) c = &OMP_CLAUSE_CHAIN (*c); else { for (i = 0; i < collapse; i++) if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c)) break; if (i == collapse) c = &OMP_CLAUSE_CHAIN (*c); else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE) { error_at (loc, "iteration variable %qD should not be firstprivate", OMP_CLAUSE_DECL (*c)); *c = OMP_CLAUSE_CHAIN (*c); } else { /* Copy lastprivate (decl) clause to OMP_FOR_CLAUSES, change it to shared (decl) in OMP_PARALLEL_CLAUSES. */ tree l = build_omp_clause (OMP_CLAUSE_LOCATION (*c), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (l) = OMP_CLAUSE_DECL (*c); OMP_CLAUSE_CHAIN (l) = clauses; clauses = l; OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED); } } } OMP_FOR_CLAUSES (stmt) = clauses; } ret = stmt; } pop_scopes: while (for_block) { /* FIXME diagnostics: LOC below should be the actual location of this particular for block. We need to build a list of locations to go along with FOR_BLOCK. */ stmt = c_end_compound_stmt (loc, TREE_VALUE (for_block), true); add_stmt (stmt); for_block = TREE_CHAIN (for_block); } return ret; } /* OpenMP 2.5: for-loop LOC is the location of the #pragma token. */ #define OMP_FOR_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_ORDERED) \ | (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (1u << PRAGMA_OMP_CLAUSE_COLLAPSE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_for (location_t loc, c_parser *parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK, " block = c_begin_compound_stmt (true); ret = c_parser_omp_for_loop (loc, parser, clauses, NULL); block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: structured-block LOC is the location of the #pragma token. */ static tree c_parser_omp_master (location_t loc, c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_master (loc, c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: structured-block LOC is the location of the #pragma itself. */ static tree c_parser_omp_ordered (location_t loc, c_parser *parser) { c_parser_skip_to_pragma_eol (parser); return c_finish_omp_ordered (loc, c_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block SECTIONS_LOC is the location of the static tree c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser) { tree stmt, substmt; bool error_suppress = false; location_t loc; loc = c_parser_peek_token (parser)->location; if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Avoid skipping until the end of the block. */ parser->error = false; return NULL_TREE; } stmt = push_stmt_list (); if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION) { substmt = push_stmt_list (); while (1) { c_parser_statement (parser); if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) break; if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; } substmt = pop_stmt_list (substmt); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } while (1) { if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is (parser, CPP_EOF)) break; loc = c_parser_peek_token (parser)->location; if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION) { c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); error_suppress = false; } else if (!error_suppress) { error_at (loc, "expected %< error_suppress = true; } substmt = c_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION (substmt, loc); add_stmt (substmt); } c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, "expected %< substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); SET_EXPR_LOCATION (stmt, sections_loc); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; return add_stmt (stmt); } /* OpenMP 2.5: sections-scope LOC is the location of the #pragma token. */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_sections (location_t loc, c_parser *parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK, " block = c_begin_compound_stmt (true); ret = c_parser_omp_sections_scope (loc, parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; block = c_end_compound_stmt (loc, block, true); add_stmt (block); return ret; } /* OpenMP 2.5: # pragma parallel parallel-clause new-line # pragma parallel for parallel-for-clause new-line # pragma parallel sections parallel-sections-clause new-line LOC is the location of the #pragma token. */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_COPYIN) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS)) static tree c_parser_omp_parallel (location_t loc, c_parser *parser) { enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL; const char *p_name = " tree stmt, clauses, par_clause, ws_clause, block; unsigned int mask = OMP_PARALLEL_CLAUSE_MASK; if (c_parser_next_token_is_keyword (parser, RID_FOR)) { c_parser_consume_token (parser); p_kind = PRAGMA_OMP_PARALLEL_FOR; p_name = " mask |= OMP_FOR_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } else if (c_parser_next_token_is (parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value); if (strcmp (p, "sections") == 0) { c_parser_consume_token (parser); p_kind = PRAGMA_OMP_PARALLEL_SECTIONS; p_name = " mask |= OMP_SECTIONS_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } } clauses = c_parser_omp_all_clauses (parser, mask, p_name); switch (p_kind) { case PRAGMA_OMP_PARALLEL: block = c_begin_omp_parallel (); c_parser_statement (parser); stmt = c_finish_omp_parallel (loc, clauses, block); break; case PRAGMA_OMP_PARALLEL_FOR: block = c_begin_omp_parallel (); c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); c_parser_omp_for_loop (loc, parser, ws_clause, &par_clause); stmt = c_finish_omp_parallel (loc, par_clause, block); OMP_PARALLEL_COMBINED (stmt) = 1; break; case PRAGMA_OMP_PARALLEL_SECTIONS: block = c_begin_omp_parallel (); c_split_parallel_clauses (loc, clauses, &par_clause, &ws_clause); stmt = c_parser_omp_sections_scope (loc, parser); if (stmt) OMP_SECTIONS_CLAUSES (stmt) = ws_clause; stmt = c_finish_omp_parallel (loc, par_clause, block); OMP_PARALLEL_COMBINED (stmt) = 1; break; default: gcc_unreachable (); } return stmt; } /* OpenMP 2.5: structured-block LOC is the location of the #pragma. */ #define OMP_SINGLE_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_single (location_t loc, c_parser *parser) { tree stmt = make_node (OMP_SINGLE); SET_EXPR_LOCATION (stmt, loc); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, " OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 3.0: LOC is the location of the #pragma. */ #define OMP_TASK_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_UNTIED) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED)) static tree c_parser_omp_task (location_t loc, c_parser *parser) { tree clauses, block; clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK, " block = c_begin_omp_task (); c_parser_statement (parser); return c_finish_omp_task (loc, clauses, block); } /* OpenMP 3.0: */ static void c_parser_omp_taskwait (c_parser *parser) { location_t loc = c_parser_peek_token (parser)->location; c_parser_consume_pragma (parser); c_parser_skip_to_pragma_eol (parser); c_finish_omp_taskwait (loc); } /* Main entry point to parsing most OpenMP pragmas. */ static void c_parser_omp_construct (c_parser *parser) { enum pragma_kind p_kind; location_t loc; tree stmt; loc = c_parser_peek_token (parser)->location; p_kind = c_parser_peek_token (parser)->pragma_kind; c_parser_consume_pragma (parser); switch (p_kind) { case PRAGMA_OMP_ATOMIC: c_parser_omp_atomic (loc, parser); return; case PRAGMA_OMP_CRITICAL: stmt = c_parser_omp_critical (loc, parser); break; case PRAGMA_OMP_FOR: stmt = c_parser_omp_for (loc, parser); break; case PRAGMA_OMP_MASTER: stmt = c_parser_omp_master (loc, parser); break; case PRAGMA_OMP_ORDERED: stmt = c_parser_omp_ordered (loc, parser); break; case PRAGMA_OMP_PARALLEL: stmt = c_parser_omp_parallel (loc, parser); break; case PRAGMA_OMP_SECTIONS: stmt = c_parser_omp_sections (loc, parser); break; case PRAGMA_OMP_SINGLE: stmt = c_parser_omp_single (loc, parser); break; case PRAGMA_OMP_TASK: stmt = c_parser_omp_task (loc, parser); break; default: gcc_unreachable (); } if (stmt) gcc_assert (EXPR_LOCATION (stmt) != UNKNOWN_LOCATION); } /* OpenMP 2.5: static void c_parser_omp_threadprivate (c_parser *parser) { tree vars, t; location_t loc; c_parser_consume_pragma (parser); loc = c_parser_peek_token (parser)->location; vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL); /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* FIXME diagnostics: Ideally we should keep individual locations for all the variables in the var list to make the following errors more precise. Perhaps c_parser_omp_var_list_parens() should construct a list of locations to go along with the var list. */ /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ if (TREE_CODE (v) != VAR_DECL) error_at (loc, "%qD is not a variable", v); else if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v)) error_at (loc, "%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error_at (loc, "automatic variable %qE cannot be %<threadprivate%>", v); else if (TREE_TYPE (v) == error_mark_node) ; else if (! COMPLETE_TYPE_P (TREE_TYPE (v))) error_at (loc, "%<threadprivate%> %qE has incomplete type", v); else { if (! DECL_THREAD_LOCAL_P (v)) { DECL_TLS_MODEL (v) = decl_default_tls_model (v); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } C_DECL_THREADPRIVATE_P (v) = 1; } } c_parser_skip_to_pragma_eol (parser); } /* Parse a single source file. */ void c_parse_file (void) { /* Use local storage to begin. If the first token is a pragma, parse it. If it is #pragma GCC pch_preprocess, then this will load a PCH file which will cause garbage collection. */ c_parser tparser; memset (&tparser, 0, sizeof tparser); the_parser = &tparser; if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS) c_parser_pragma_pch_preprocess (&tparser); the_parser = GGC_NEW (c_parser); *the_parser = tparser; /* Initialize EH, if we've been told to do so. */ if (flag_exceptions) using_eh_for_cleanups (); c_parser_translation_unit (the_parser); the_parser = NULL; } #include "gt-c-parser.h" #ifdef __cplusplus } /* extern "C" */ #endif
/* * TODO: * * Make sure all relevant comments, and all relevant code from all actions, * brought over from old parser. Verify exact correspondence of syntax * accepted. * * Add testcases covering every input symbol in every state in old and new * parsers. * * Include full syntax for GNU C, including erroneous cases accepted with error * messages, in syntax productions in comments. * * Make more diagnostics in the front end generally take an explicit location * rather than implicitly using input_location. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "rtl.h" #include "langhooks.h" #include "input.h" #include "cpplib.h" #include "timevar.h" #include "c-pragma.h" #include "c-tree.h" #include "flags.h" #include "output.h" #include "toplev.h" #include "ggc.h" #include "c-common.h" #include "vec.h" #include "target.h" #include "cgraph.h" #include "plugin.h" #include "except.h" #ifdef __cplusplus extern "C" { #endif /* Initialization routine for this file. */ void c_parse_init(void) { /* * The only initialization required is of the reserved word * identifiers. */ unsigned int i; tree id; int mask = 0; /* * Make sure RID_MAX hasn't grown past the 8 bits used to hold the * keyword in the c_token structure. */ gcc_assert(RID_MAX <= 255); mask |= D_CXXONLY; if (!flag_isoc99) mask |= D_C99; if (flag_no_asm) { mask |= D_ASM | D_EXT; if (!flag_isoc99) mask |= D_EXT89; } if (!c_dialect_objc()) mask |= D_OBJC | D_CXX_OBJC; ridpointers = GGC_CNEWVEC(tree, (int)RID_MAX); for (i = 0; i < num_c_common_reswords; i++) { /* * If a keyword is disabled, do not enter it into the table and * so create a canonical spelling that isn't a keyword. */ if (c_common_reswords[i].disable & mask) { if (warn_cxx_compat && (c_common_reswords[i].disable & D_CXXWARN)) { id = get_identifier(c_common_reswords[i].word); C_SET_RID_CODE(id, RID_CXX_COMPAT_WARN); C_IS_RESERVED_WORD(id) = 1; } continue; } id = get_identifier(c_common_reswords[i].word); C_SET_RID_CODE(id, c_common_reswords[i].rid); C_IS_RESERVED_WORD(id) = 1; ridpointers[(int)c_common_reswords[i].rid] = id; } } /* * The C lexer intermediates between the lexer in cpplib and c-lex.c and * the C parser. Unlike the C++ lexer, the parser structure stores the * lexer information instead of using a separate structure. Identifiers * are separated into ordinary identifiers, type names, keywords and some * other Objective-C types of identifiers, and some look-ahead is * maintained. * * ??? It might be a good idea to lex the whole file up front (as for C++). * It would then be possible to share more of the C and C++ lexer code, * if desired. */ /* The following local token type is used. */ /* A keyword. */ #define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1)) /* More information about the type of a CPP_NAME token. */ typedef enum c_id_kind { /* An ordinary identifier. */ C_ID_ID, /* An identifier declared as a typedef name. */ C_ID_TYPENAME, /* An identifier declared as an Objective-C class name. */ C_ID_CLASSNAME, /* An address space identifier. */ C_ID_ADDRSPACE, /* Not an identifier. */ C_ID_NONE } c_id_kind; /* * A single C token after string literal concatenation and conversion of * preprocessing tokens to tokens. */ typedef struct GTY (()) c_token { /* The kind of token. */ ENUM_BITFIELD(cpp_ttype, type, 8); /* * If this token is a CPP_NAME, this value indicates whether also * declared as some kind of type. Otherwise, it is C_ID_NONE. */ ENUM_BITFIELD(c_id_kind, id_kind, 8); /* * If this token is a keyword, this value indicates which keyword. * Otherwise, this value is RID_MAX. */ ENUM_BITFIELD(rid, keyword, 8); /* * If this token is a CPP_PRAGMA, this indicates the pragma that was * seen. Otherwise it is PRAGMA_NONE. */ ENUM_BITFIELD(pragma_kind, pragma_kind, 8); /* The value associated with this token, if any. */ tree value; /* The location at which this token was found. */ location_t location; } c_token; /* * A parser structure recording information about the state and context * of parsing. Includes lexer information with up to two tokens of * look-ahead; more are not needed for C. */ typedef struct GTY (()) c_parser { /* The look-ahead tokens. */ c_token tokens[2]; /* How many look-ahead tokens are available (0, 1 or 2). */ short tokens_avail; /* * True if a syntax error is being recovered from; false otherwise. * c_parser_error sets this flag. It should clear this flag when * enough tokens have been consumed to recover from the error. */ BOOL_BITFIELD error:1; /* * True if we're processing a pragma, and shouldn't automatically * consume CPP_PRAGMA_EOL. */ BOOL_BITFIELD in_pragma:1; /* True if we're parsing the outermost block of an if statement. */ BOOL_BITFIELD in_if_block:1; /* True if we want to lex an untranslated string. */ BOOL_BITFIELD lex_untranslated_string:1; /* Objective-C specific parser/lexer information. */ BOOL_BITFIELD objc_pq_context:1; /* * The following flag is needed to contextualize Objective-C lexical * analysis. In some cases (e.g., 'int NSObject;'), it is * undesirable to bind an identifier to an Objective-C class, even if * a class with that name exists. */ BOOL_BITFIELD objc_need_raw_identifier:1; } c_parser; /* * The actual parser and external interface. ??? Does this need to be * garbage-collected? */ static GTY(()) c_parser *the_parser; /* Read in and lex a single token, storing it in *TOKEN. */ static void c_lex_one_token(c_parser * parser, c_token * token) { timevar_push(TV_LEX); token->type = c_lex_with_flags(&token->value, &token->location, NULL, (parser->lex_untranslated_string ? C_LEX_STRING_NO_TRANSLATE : 0)); token->id_kind = C_ID_NONE; token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; switch (token->type) { case CPP_NAME: { tree decl; bool objc_force_identifier = parser->objc_need_raw_identifier; if (c_dialect_objc()) parser->objc_need_raw_identifier = false; if (C_IS_RESERVED_WORD(token->value)) { enum rid rid_code = C_RID_CODE(token->value); if (rid_code == RID_CXX_COMPAT_WARN) { warning_at(token->location, OPT_Wc___compat, "identifier %qE conflicts with C++ keyword", token->value); } else if (rid_code >= RID_FIRST_ADDR_SPACE && rid_code <= RID_LAST_ADDR_SPACE) { token->id_kind = C_ID_ADDRSPACE; token->keyword = rid_code; break; } else if (c_dialect_objc()) { if (!objc_is_reserved_word(token->value) && (!OBJC_IS_PQ_KEYWORD(rid_code) || parser->objc_pq_context)) { /* * Return the canonical spelling for this * keyword. */ token->value = ridpointers[(int)rid_code]; token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } else { token->type = CPP_KEYWORD; token->keyword = rid_code; break; } } decl = lookup_name(token->value); if (decl) { if (TREE_CODE(decl) == TYPE_DECL) { token->id_kind = C_ID_TYPENAME; break; } } else if (c_dialect_objc()) { tree objc_interface_decl = objc_is_class_name(token->value); /* * Objective-C class names are in the same namespace as * variables and typedefs, and hence are shadowed by * local declarations. */ if (objc_interface_decl && (global_bindings_p() || (!objc_force_identifier && !decl))) { token->value = objc_interface_decl; token->id_kind = C_ID_CLASSNAME; break; } } token->id_kind = C_ID_ID; } break; case CPP_AT_NAME: /* This only happens in Objective-C; it must be a keyword. */ token->type = CPP_KEYWORD; token->keyword = C_RID_CODE(token->value); break; case CPP_COLON: case CPP_COMMA: case CPP_CLOSE_PAREN: case CPP_SEMICOLON: /* * These tokens may affect the interpretation of any identifiers * following, if doing Objective-C. */ if (c_dialect_objc()) parser->objc_need_raw_identifier = false; break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = (enum pragma_kind)TREE_INT_CST_LOW(token->value); token->value = NULL; break; default: break; } timevar_pop(TV_LEX); } /* * Return a pointer to the next token from PARSER, reading it in if * necessary. */ static inline c_token * c_parser_peek_token(c_parser * parser) { if (parser->tokens_avail == 0) { c_lex_one_token(parser, &parser->tokens[0]); parser->tokens_avail = 1; } return &parser->tokens[0]; } /* * Return true if the next token from PARSER has the indicated TYPE. */ static inline bool c_parser_next_token_is(c_parser * parser, enum cpp_ttype type) { return c_parser_peek_token(parser)->type == type; } /* * Return true if the next token from PARSER does not have the indicated * TYPE. */ static inline bool c_parser_next_token_is_not(c_parser * parser, enum cpp_ttype type) { return !c_parser_next_token_is(parser, type); } /* * Return true if the next token from PARSER is the indicated KEYWORD. */ static inline bool c_parser_next_token_is_keyword(c_parser * parser, enum rid keyword) { return c_parser_peek_token(parser)->keyword == keyword; } /* * Return true if TOKEN can start a type name, false otherwise. */ static bool c_token_starts_typename(c_token * token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert(c_dialect_objc()); return true; default: gcc_unreachable(); } case CPP_KEYWORD: switch (token->keyword) { case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: return true; default: return false; } case CPP_LESS: if (c_dialect_objc()) return true; return false; default: return false; } } /* * Return true if the next token from PARSER can start a type name, false * otherwise. */ static inline bool c_parser_next_token_starts_typename(c_parser * parser) { c_token *token = c_parser_peek_token(parser); return c_token_starts_typename(token); } /* * Return true if TOKEN can start declaration specifiers, false * otherwise. */ static bool c_token_starts_declspecs(c_token * token) { switch (token->type) { case CPP_NAME: switch (token->id_kind) { case C_ID_ID: return false; case C_ID_ADDRSPACE: return true; case C_ID_TYPENAME: return true; case C_ID_CLASSNAME: gcc_assert(c_dialect_objc()); return true; default: gcc_unreachable(); } case CPP_KEYWORD: switch (token->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_TYPEOF: case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: case RID_ATTRIBUTE: case RID_FRACT: case RID_ACCUM: case RID_SAT: return true; default: return false; } case CPP_LESS: if (c_dialect_objc()) return true; return false; default: return false; } } /* * Return true if the next token from PARSER can start declaration * specifiers, false otherwise. */ static inline bool c_parser_next_token_starts_declspecs(c_parser * parser) { c_token *token = c_parser_peek_token(parser); return c_token_starts_declspecs(token); } /* * Return a pointer to the next-but-one token from PARSER, reading it in * if necessary. The next token is already read in. */ static c_token * c_parser_peek_2nd_token(c_parser * parser) { if (parser->tokens_avail >= 2) return &parser->tokens[1]; gcc_assert(parser->tokens_avail == 1); gcc_assert(parser->tokens[0].type != CPP_EOF); gcc_assert(parser->tokens[0].type != CPP_PRAGMA_EOL); c_lex_one_token(parser, &parser->tokens[1]); parser->tokens_avail = 2; return &parser->tokens[1]; } /* Consume the next token from PARSER. */ static void c_parser_consume_token(c_parser * parser) { gcc_assert(parser->tokens_avail >= 1); gcc_assert(parser->tokens[0].type != CPP_EOF); gcc_assert(!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL); gcc_assert(parser->error || parser->tokens[0].type != CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; } /* * Expect the current token to be a #pragma. Consume it and remember * that we've begun parsing a pragma. */ static void c_parser_consume_pragma(c_parser * parser) { gcc_assert(!parser->in_pragma); gcc_assert(parser->tokens_avail >= 1); gcc_assert(parser->tokens[0].type == CPP_PRAGMA); if (parser->tokens_avail == 2) parser->tokens[0] = parser->tokens[1]; parser->tokens_avail--; parser->in_pragma = true; } /* * Update the globals input_location and in_system_header from TOKEN. */ static inline void c_parser_set_source_position_from_token(c_token * token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* * Issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where * TOKEN is the next token in the input stream of PARSER. MESSAGE * (specified by the caller) is usually of the form "expected * OTHER-TOKEN". * * Do not issue a diagnostic if still recovering from an error. * * ??? This is taken from the C++ parser, but building up messages in this * way is not i18n-friendly and some other approach should be used. */ static void c_parser_error(c_parser * parser, const char *gmsgid) { c_token *token = c_parser_peek_token(parser); if (parser->error) return; parser->error = true; if (!gmsgid) return; /* * This diagnostic makes more sense if it is tagged to the line of * the token we just peeked at. */ c_parser_set_source_position_from_token(token); c_parse_error(gmsgid, /* * Because c_parse_error does not understand CPP_KEYWORD, keywords * are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), /* * ??? The C parser does not save the cpp flags of a token, we need * to pass 0 here and we will not get the source spelling of some * tokens but rather the canonical spelling. */ token->value, /* flags= */ 0); } /* * If the next token is of the indicated TYPE, consume it. Otherwise, * issue the error MSGID. If MSGID is NULL then a message has already * been produced and no message will be produced this time. Returns true * if found, false otherwise. */ static bool c_parser_require(c_parser * parser, enum cpp_ttype type, const char *msgid) { if (c_parser_next_token_is(parser, type)) { c_parser_consume_token(parser); return true; } else { c_parser_error(parser, msgid); return false; } } /* * If the next token is the indicated keyword, consume it. Otherwise, * issue the error MSGID. Returns true if found, false otherwise. */ static bool c_parser_require_keyword(c_parser * parser, enum rid keyword, const char *msgid) { if (c_parser_next_token_is_keyword(parser, keyword)) { c_parser_consume_token(parser); return true; } else { c_parser_error(parser, msgid); return false; } } /* * Like c_parser_require, except that tokens will be skipped until the * desired token is found. An error message is still produced if the * next token is not as expected. If MSGID is NULL then a message has * already been produced and no message will be produced this time. */ static void c_parser_skip_until_found(c_parser * parser, enum cpp_ttype type, const char *msgid) { unsigned nesting_depth = 0; if (c_parser_require(parser, type, msgid)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ c_token *token = c_parser_peek_token(parser); /* If we've reached the token we want, consume it and stop. */ if (token->type == type && !nesting_depth) { c_parser_consume_token(parser); break; } /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token(parser); } parser->error = false; } /* * Skip tokens until the end of a parameter is found, but do not consume * the comma, semicolon or closing delimiter. */ static void c_parser_skip_to_end_of_parameter(c_parser * parser) { unsigned nesting_depth = 0; while (true) { c_token *token = c_parser_peek_token(parser); if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON) && !nesting_depth) break; /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) return; if (token->type == CPP_PRAGMA_EOL && parser->in_pragma) return; if (token->type == CPP_OPEN_BRACE || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_SQUARE) ++nesting_depth; else if (token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE) { if (nesting_depth-- == 0) break; } /* Consume this token. */ c_parser_consume_token(parser); } parser->error = false; } /* * Expect to be at the end of the pragma directive and consume an end of * line marker. */ static void c_parser_skip_to_pragma_eol(c_parser * parser) { gcc_assert(parser->in_pragma); parser->in_pragma = false; if (!c_parser_require(parser, CPP_PRAGMA_EOL, "expected end of line")) while (true) { c_token *token = c_parser_peek_token(parser); if (token->type == CPP_EOF) break; if (token->type == CPP_PRAGMA_EOL) { c_parser_consume_token(parser); break; } c_parser_consume_token(parser); } parser->error = false; } /* * Skip tokens until we have consumed an entire block, or until we have * consumed a non-nested ';'. */ static void c_parser_skip_to_end_of_block_or_statement(c_parser * parser) { unsigned nesting_depth = 0; bool save_error = parser->error; while (true) { c_token *token; /* Peek at the next token. */ token = c_parser_peek_token(parser); switch (token->type) { case CPP_EOF: return; case CPP_PRAGMA_EOL: if (parser->in_pragma) return; break; case CPP_SEMICOLON: /* * If the next token is a ';', we have reached the end of the * statement. */ if (!nesting_depth) { /* Consume the ';'. */ c_parser_consume_token(parser); goto finished; } break; case CPP_CLOSE_BRACE: /* * If the next token is a non-nested '}', then we have * reached the end of the current block. */ if (nesting_depth == 0 || --nesting_depth == 0) { c_parser_consume_token(parser); goto finished; } break; case CPP_OPEN_BRACE: /* * If it the next token is a '{', then we are entering a new * block. Consume the entire block. */ ++nesting_depth; break; case CPP_PRAGMA: /* * If we see a pragma, consume the whole thing at once. We * have some safeguards against consuming pragmas * willy-nilly. Normally, we'd expect to be here with * parser->error set, which disables these safeguards. But * it's possible to get here for secondary error recovery, * after parser->error has been cleared. */ c_parser_consume_pragma(parser); c_parser_skip_to_pragma_eol(parser); parser->error = save_error; continue; default: break; } c_parser_consume_token(parser); } finished: parser->error = false; } /* CPP's options (initialized by c-opts.c). */ extern cpp_options *cpp_opts; /* Save the warning flags which are controlled by __extension__. */ static inline int disable_extension_diagnostics(void) { int ret = (pedantic | (warn_pointer_arith << 1) | (warn_traditional << 2) | (flag_iso << 3) | (warn_long_long << 4) | (warn_cxx_compat << 5)); cpp_opts->pedantic = pedantic = 0; warn_pointer_arith = 0; cpp_opts->warn_traditional = warn_traditional = 0; flag_iso = 0; cpp_opts->warn_long_long = warn_long_long = 0; warn_cxx_compat = 0; return ret; } /* * Restore the warning flags which are controlled by __extension__. FLAGS * is the return value from disable_extension_diagnostics. */ static inline void restore_extension_diagnostics(int flags) { cpp_opts->pedantic = pedantic = flags & 1; warn_pointer_arith = (flags >> 1) & 1; cpp_opts->warn_traditional = warn_traditional = (flags >> 2) & 1; flag_iso = (flags >> 3) & 1; cpp_opts->warn_long_long = warn_long_long = (flags >> 4) & 1; warn_cxx_compat = (flags >> 5) & 1; } /* Possibly kinds of declarator to parse. */ typedef enum c_dtr_syn { /* A normal declarator with an identifier. */ C_DTR_NORMAL, /* An abstract declarator (maybe empty). */ C_DTR_ABSTRACT, /* * A parameter declarator: may be either, but after a type name does * not redeclare a typedef name as an identifier if it can * alternatively be interpreted as a typedef name; see DR#009, * applied in C90 TC1, omitted from C99 and reapplied in C99 TC2 * following DR#249. For example, given a typedef T, "int T" and * "int *T" are valid parameter declarations redeclaring T, while * "int (T)" and "int * (T)" and "int (T[])" and "int (T (int))" are * abstract declarators rather than involving redundant parentheses; * the same applies with attributes inside the parentheses before * "T". */ C_DTR_PARM } c_dtr_syn; static void c_parser_external_declaration(c_parser *); static void c_parser_asm_definition(c_parser *); static void c_parser_declaration_or_fndef(c_parser *, bool, bool, bool, bool); static void c_parser_declspecs(c_parser *, struct c_declspecs *, bool, bool, bool); static struct c_typespec c_parser_enum_specifier(c_parser *); static struct c_typespec c_parser_struct_or_union_specifier(c_parser *); static tree c_parser_struct_declaration(c_parser *); static struct c_typespec c_parser_typeof_specifier(c_parser *); static struct c_declarator *c_parser_declarator(c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator(c_parser *, bool, c_dtr_syn, bool *); static struct c_declarator *c_parser_direct_declarator_inner(c_parser *, bool, struct c_declarator *); static struct c_arg_info *c_parser_parms_declarator(c_parser *, bool, tree); static struct c_arg_info *c_parser_parms_list_declarator(c_parser *, tree); static struct c_parm *c_parser_parameter_declaration(c_parser *, tree); static tree c_parser_simple_asm_expr(c_parser *); static tree c_parser_attributes(c_parser *); static struct c_type_name *c_parser_type_name(c_parser *); static struct c_expr c_parser_initializer(c_parser *); static struct c_expr c_parser_braced_init(c_parser *, tree, bool); static void c_parser_initelt(c_parser *); static void c_parser_initval(c_parser *, struct c_expr *); static tree c_parser_compound_statement(c_parser *); static void c_parser_compound_statement_nostart(c_parser *); static void c_parser_label(c_parser *); static void c_parser_statement(c_parser *); static void c_parser_statement_after_labels(c_parser *); static void c_parser_if_statement(c_parser *); static void c_parser_switch_statement(c_parser *); static void c_parser_while_statement(c_parser *); static void c_parser_do_statement(c_parser *); static void c_parser_for_statement(c_parser *); static tree c_parser_asm_statement(c_parser *); static tree c_parser_asm_operands(c_parser *, bool); static tree c_parser_asm_goto_operands(c_parser *); static tree c_parser_asm_clobbers(c_parser *); static struct c_expr c_parser_expr_no_commas(c_parser *, struct c_expr *); static struct c_expr c_parser_conditional_expression(c_parser *, struct c_expr *); static struct c_expr c_parser_binary_expression(c_parser *, struct c_expr *); static struct c_expr c_parser_cast_expression(c_parser *, struct c_expr *); static struct c_expr c_parser_unary_expression(c_parser *); static struct c_expr c_parser_sizeof_expression(c_parser *); static struct c_expr c_parser_alignof_expression(c_parser *); static struct c_expr c_parser_postfix_expression(c_parser *); static struct c_expr c_parser_postfix_expression_after_paren_type(c_parser *, struct c_type_name *, location_t); static struct c_expr c_parser_postfix_expression_after_primary(c_parser *, location_t loc, struct c_expr); static struct c_expr c_parser_expression(c_parser *); static struct c_expr c_parser_expression_conv(c_parser *); static VEC(tree, gc) * c_parser_expr_list(c_parser *, bool, bool, VEC(tree, gc) **); static void c_parser_omp_construct(c_parser *); static void c_parser_omp_threadprivate(c_parser *); static void c_parser_omp_barrier(c_parser *); static void c_parser_omp_flush(c_parser *); static void c_parser_omp_taskwait(c_parser *); enum pragma_context { pragma_external, pragma_stmt, pragma_compound }; static bool c_parser_pragma(c_parser *, enum pragma_context); /* * These Objective-C parser functions are only ever called when compiling * Objective-C. */ static void c_parser_objc_class_definition(c_parser *); static void c_parser_objc_class_instance_variables(c_parser *); static void c_parser_objc_class_declaration(c_parser *); static void c_parser_objc_alias_declaration(c_parser *); static void c_parser_objc_protocol_definition(c_parser *); static enum tree_code c_parser_objc_method_type(c_parser *); static void c_parser_objc_method_definition(c_parser *); static void c_parser_objc_methodprotolist(c_parser *); static void c_parser_objc_methodproto(c_parser *); static tree c_parser_objc_method_decl(c_parser *); static tree c_parser_objc_type_name(c_parser *); static tree c_parser_objc_protocol_refs(c_parser *); static void c_parser_objc_try_catch_statement(c_parser *); static void c_parser_objc_synchronized_statement(c_parser *); static tree c_parser_objc_selector(c_parser *); static tree c_parser_objc_selector_arg(c_parser *); static tree c_parser_objc_receiver(c_parser *); static tree c_parser_objc_message_args(c_parser *); static tree c_parser_objc_keywordexpr(c_parser *); /* * Parse a translation unit (C90 6.7, C99 6.9). * * translation-unit: external-declarations * * external-declarations: external-declaration external-declarations * external-declaration * * GNU extensions: * * translation-unit: empty */ static void c_parser_translation_unit(c_parser * parser) { if (c_parser_next_token_is(parser, CPP_EOF)) { pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "ISO C forbids an empty translation unit"); } else { void *obstack_position = obstack_alloc(&parser_obstack, 0); mark_valid_location_for_stdc_pragma(false); do { ggc_collect(); c_parser_external_declaration(parser); obstack_free(&parser_obstack, obstack_position); } while (c_parser_next_token_is_not(parser, CPP_EOF)); } } /* * Parse an external declaration (C90 6.7, C99 6.9). * * external-declaration: function-definition declaration * * GNU extensions: * * external-declaration: asm-definition ; __extension__ external-declaration * * Objective-C: * * external-declaration: objc-class-definition objc-class-declaration * objc-alias-declaration objc-protocol-definition objc-method-definition * @end */ static void c_parser_external_declaration(c_parser * parser) { int ext; switch (c_parser_peek_token(parser)->type) { case CPP_KEYWORD: switch (c_parser_peek_token(parser)->keyword) { case RID_EXTENSION: ext = disable_extension_diagnostics(); c_parser_consume_token(parser); c_parser_external_declaration(parser); restore_extension_diagnostics(ext); break; case RID_ASM: c_parser_asm_definition(parser); break; case RID_AT_INTERFACE: case RID_AT_IMPLEMENTATION: gcc_assert(c_dialect_objc()); c_parser_objc_class_definition(parser); break; case RID_CLASS: gcc_assert(c_dialect_objc()); c_parser_objc_class_declaration(parser); break; case RID_AT_ALIAS: gcc_assert(c_dialect_objc()); c_parser_objc_alias_declaration(parser); break; case RID_AT_PROTOCOL: gcc_assert(c_dialect_objc()); c_parser_objc_protocol_definition(parser); break; case RID_AT_END: gcc_assert(c_dialect_objc()); c_parser_consume_token(parser); objc_finish_implementation(); break; default: goto decl_or_fndef; } break; case CPP_SEMICOLON: pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token(parser); break; case CPP_PRAGMA: mark_valid_location_for_stdc_pragma(true); c_parser_pragma(parser, pragma_external); mark_valid_location_for_stdc_pragma(false); break; case CPP_PLUS: case CPP_MINUS: if (c_dialect_objc()) { c_parser_objc_method_definition(parser); break; } /* * Else fall through, and yield a syntax error trying to parse as * a declaration or function definition. */ default: decl_or_fndef: /* * A declaration or a function definition. We can only tell * which after parsing the declaration specifiers, if any, and * the first declarator. */ c_parser_declaration_or_fndef(parser, true, true, false, true); break; } } /* * Parse a declaration or function definition (C90 6.5, 6.7.1, C99 6.7, * 6.9.1). If FNDEF_OK is true, a function definition is accepted; * otherwise (old-style parameter declarations) only other declarations * are accepted. If NESTED is true, we are inside a function or parsing * old-style parameter declarations; any functions encountered are nested * functions and declaration specifiers are required; otherwise we are at * top level and functions are normal functions and declaration * specifiers may be optional. If EMPTY_OK is true, empty declarations * are OK (subject to all other constraints); otherwise (old-style * parameter declarations) they are diagnosed. If START_ATTR_OK is true, * the declaration specifiers may start with attributes; otherwise they * may not. * * declaration: declaration-specifiers init-declarator-list[opt] ; * * function-definition: declaration-specifiers[opt] declarator * declaration-list[opt] compound-statement * * declaration-list: declaration declaration-list declaration * * init-declarator-list: init-declarator init-declarator-list , * init-declarator * * init-declarator: declarator simple-asm-expr[opt] attributes[opt] * declarator simple-asm-expr[opt] attributes[opt] = initializer * * GNU extensions: * * nested-function-definition: declaration-specifiers declarator * declaration-list[opt] compound-statement * * The simple-asm-expr and attributes are GNU extensions. * * This function does not handle __extension__; that is handled in its * callers. ??? Following the old parser, __extension__ may start * external declarations, declarations in functions and declarations at * the start of "for" loops, but not old-style parameter declarations. * * C99 requires declaration specifiers in a function definition; the absence * is diagnosed through the diagnosis of implicit int. In GNU C we also * allow but diagnose declarations without declaration specifiers, but * only at top level (elsewhere they conflict with other syntax). * * OpenMP: * * declaration: threadprivate-directive */ static void c_parser_declaration_or_fndef(c_parser * parser, bool fndef_ok, bool empty_ok, bool nested, bool start_attr_ok) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; bool diagnosed_no_specs = false; location_t here = c_parser_peek_token(parser)->location; specs = build_null_declspecs(); c_parser_declspecs(parser, specs, true, true, start_attr_ok); if (parser->error) { c_parser_skip_to_end_of_block_or_statement(parser); return; } if (nested && !specs->declspecs_seen_p) { c_parser_error(parser, "expected declaration specifiers"); c_parser_skip_to_end_of_block_or_statement(parser); return; } finish_declspecs(specs); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { if (empty_ok) shadow_tag(specs); else { shadow_tag_warned(specs, 1); pedwarn(here, 0, "empty declaration"); } c_parser_consume_token(parser); return; } pending_xref_error(); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; while (true) { struct c_declarator *declarator; bool dummy = false; tree fnbody; /* * Declaring either one or more declarators (in which case we * should diagnose if there were no declaration specifiers) or a * function definition (in which case the diagnostic for implicit * int suffices). */ declarator = c_parser_declarator(parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement(parser); return; } if (c_parser_next_token_is(parser, CPP_EQ) || c_parser_next_token_is(parser, CPP_COMMA) || c_parser_next_token_is(parser, CPP_SEMICOLON) || c_parser_next_token_is_keyword(parser, RID_ASM) || c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) { tree asm_name = NULL_TREE; tree postfix_attrs = NULL_TREE; if (!diagnosed_no_specs && !specs->declspecs_seen_p) { diagnosed_no_specs = true; pedwarn(here, 0, "data definition has no type or storage class"); } /* * Having seen a data definition, there cannot now be a * function definition. */ fndef_ok = false; if (c_parser_next_token_is_keyword(parser, RID_ASM)) asm_name = c_parser_simple_asm_expr(parser); if (c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes(parser); if (c_parser_next_token_is(parser, CPP_EQ)) { tree d; struct c_expr init; location_t init_loc; c_parser_consume_token(parser); /* * The declaration of the variable is in effect while its * initializer is parsed. */ d = start_decl(declarator, specs, true, chainon(postfix_attrs, all_prefix_attrs)); if (!d) d = error_mark_node; start_init(d, asm_name, global_bindings_p()); init_loc = c_parser_peek_token(parser)->location; init = c_parser_initializer(parser); finish_init(); if (d != error_mark_node) { maybe_warn_string_init(TREE_TYPE(d), init); finish_decl(d, init_loc, init.value, init.original_type, asm_name); } } else { tree d = start_decl(declarator, specs, false, chainon(postfix_attrs, all_prefix_attrs)); if (d) finish_decl(d, UNKNOWN_LOCATION, NULL_TREE, NULL_TREE, asm_name); } if (c_parser_next_token_is(parser, CPP_COMMA)) { c_parser_consume_token(parser); if (c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon(c_parser_attributes(parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; continue; } else if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { c_parser_consume_token(parser); return; } else { c_parser_error(parser, "expected %<,%> or %<;%>"); c_parser_skip_to_end_of_block_or_statement(parser); return; } } else if (!fndef_ok) { c_parser_error(parser, "expected %<=%>, %<,%>, %<;%>, " "%<asm%> or %<__attribute__%>"); c_parser_skip_to_end_of_block_or_statement(parser); return; } /* Function definition (nested or otherwise). */ if (nested) { pedwarn(here, OPT_pedantic, "ISO C forbids nested functions"); c_push_function_context(); } if (!start_function(specs, declarator, all_prefix_attrs)) { /* * This can appear in many cases looking nothing like a * function definition, so we don't give a more specific * error suggesting there was one. */ c_parser_error(parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> " "or %<__attribute__%>"); if (nested) c_pop_function_context(); break; } /* * Parse old-style parameter declarations. ??? Attributes are * not allowed to start declaration specifiers here because of a * syntax conflict between a function declaration with attribute * suffix and a function definition with an attribute prefix on * first old-style parameter declaration. Following the old * parser, they are not accepted on subsequent old-style * parameter declarations either. However, there is no ambiguity * after the first declaration, nor indeed on the first as long * as we don't allow postfix attributes after a declarator with a * nonempty identifier list in a definition; and postfix * attributes have never been accepted here in function * definitions either. */ while (c_parser_next_token_is_not(parser, CPP_EOF) && c_parser_next_token_is_not(parser, CPP_OPEN_BRACE)) c_parser_declaration_or_fndef(parser, false, false, true, false); store_parm_decls(); DECL_STRUCT_FUNCTION(current_function_decl)->function_start_locus = c_parser_peek_token(parser)->location; fnbody = c_parser_compound_statement(parser); if (nested) { tree decl = current_function_decl; /* * Mark nested functions as needing static-chain initially. * lower_nested_functions will recompute it but the * DECL_STATIC_CHAIN flag is also used before that happens, * by initializer_constant_valid_p. See * gcc.dg/nested-fn-2.c. */ DECL_STATIC_CHAIN(decl) = 1; add_stmt(fnbody); finish_function(); c_pop_function_context(); add_stmt(build_stmt(DECL_SOURCE_LOCATION(decl), DECL_EXPR, decl)); } else { add_stmt(fnbody); finish_function(); } break; } } /* * Parse an asm-definition (asm() outside a function body). This is a * GNU extension. * * asm-definition: simple-asm-expr ; */ static void c_parser_asm_definition(c_parser * parser) { tree asm_str = c_parser_simple_asm_expr(parser); if (asm_str) cgraph_add_asm_node(asm_str); c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } /* * Parse some declaration specifiers (possibly none) (C90 6.5, C99 6.7), * adding them to SPECS (which may already include some). Storage class * specifiers are accepted iff SCSPEC_OK; type specifiers are accepted * iff TYPESPEC_OK; attributes are accepted at the start iff * START_ATTR_OK. * * declaration-specifiers: storage-class-specifier * declaration-specifiers[opt] type-specifier declaration-specifiers[opt] * type-qualifier declaration-specifiers[opt] function-specifier * declaration-specifiers[opt] * * Function specifiers (inline) are from C99, and are currently handled as * storage class specifiers, as is __thread. * * C90 6.5.1, C99 6.7.1: storage-class-specifier: typedef extern static auto * register * * C99 6.7.4: function-specifier: inline * * C90 6.5.2, C99 6.7.2: type-specifier: void char short int long float * double signed unsigned _Bool _Complex [_Imaginary removed in C99 TC2] * struct-or-union-specifier enum-specifier typedef-name * * (_Bool and _Complex are new in C99.) * * C90 6.5.3, C99 6.7.3: * * type-qualifier: const restrict volatile address-space-qualifier * * (restrict is new in C99.) * * GNU extensions: * * declaration-specifiers: attributes declaration-specifiers[opt] * * type-qualifier: address-space * * address-space: identifier recognized by the target * * storage-class-specifier: __thread * * type-specifier: typeof-specifier _Decimal32 _Decimal64 _Decimal128 _Fract * _Accum _Sat * * (_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037: * http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf) * * Objective-C: * * type-specifier: class-name objc-protocol-refs[opt] typedef-name * objc-protocol-refs objc-protocol-refs */ static void c_parser_declspecs(c_parser * parser, struct c_declspecs *specs, bool scspec_ok, bool typespec_ok, bool start_attr_ok) { bool attrs_ok = start_attr_ok; bool seen_type = specs->type_seen_p; while (c_parser_next_token_is(parser, CPP_NAME) || c_parser_next_token_is(parser, CPP_KEYWORD) || (c_dialect_objc() && c_parser_next_token_is(parser, CPP_LESS))) { struct c_typespec t; tree attrs; location_t loc = c_parser_peek_token(parser)->location; if (c_parser_next_token_is(parser, CPP_NAME)) { tree value = c_parser_peek_token(parser)->value; c_id_kind kind = c_parser_peek_token(parser)->id_kind; if (kind == C_ID_ADDRSPACE) { addr_space_t as = c_parser_peek_token(parser)->keyword - RID_FIRST_ADDR_SPACE; declspecs_add_addrspace(specs, as); c_parser_consume_token(parser); attrs_ok = true; continue; } /* * This finishes the specifiers unless a type name is OK, it * is declared as a type name and a type name hasn't yet been * seen. */ if (!typespec_ok || seen_type || (kind != C_ID_TYPENAME && kind != C_ID_CLASSNAME)) break; c_parser_consume_token(parser); seen_type = true; attrs_ok = true; if (kind == C_ID_TYPENAME && (!c_dialect_objc() || c_parser_next_token_is_not(parser, CPP_LESS))) { t.kind = ctsk_typedef; /* * For a typedef name, record the meaning, not the name. * In case of 'foo foo, bar;'. */ t.spec = lookup_name(value); t.expr = NULL_TREE; t.expr_const_operands = true; } else { tree proto = NULL_TREE; gcc_assert(c_dialect_objc()); t.kind = ctsk_objc; if (c_parser_next_token_is(parser, CPP_LESS)) proto = c_parser_objc_protocol_refs(parser); t.spec = objc_get_protocol_qualified_type(value, proto); t.expr = NULL_TREE; t.expr_const_operands = true; } declspecs_add_type(loc, specs, t); continue; } if (c_parser_next_token_is(parser, CPP_LESS)) { /* * Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" - * nisse@lysator.liu.se. */ tree proto; gcc_assert(c_dialect_objc()); if (!typespec_ok || seen_type) break; proto = c_parser_objc_protocol_refs(parser); t.kind = ctsk_objc; t.spec = objc_get_protocol_qualified_type(NULL_TREE, proto); t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type(loc, specs, t); continue; } gcc_assert(c_parser_next_token_is(parser, CPP_KEYWORD)); switch (c_parser_peek_token(parser)->keyword) { case RID_STATIC: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_INLINE: case RID_AUTO: case RID_THREAD: if (!scspec_ok) goto out; attrs_ok = true; /* * TODO: Distinguish between function specifiers (inline) and * storage class specifiers, either here or in * declspecs_add_scspec. */ declspecs_add_scspec(specs, c_parser_peek_token(parser)->value); c_parser_consume_token(parser); break; case RID_UNSIGNED: case RID_LONG: case RID_SHORT: case RID_SIGNED: case RID_COMPLEX: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; if (c_dialect_objc()) parser->objc_need_raw_identifier = true; t.kind = ctsk_resword; t.spec = c_parser_peek_token(parser)->value; t.expr = NULL_TREE; t.expr_const_operands = true; declspecs_add_type(loc, specs, t); c_parser_consume_token(parser); break; case RID_ENUM: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_enum_specifier(parser); declspecs_add_type(loc, specs, t); break; case RID_STRUCT: case RID_UNION: if (!typespec_ok) goto out; attrs_ok = true; seen_type = true; t = c_parser_struct_or_union_specifier(parser); invoke_plugin_callbacks(PLUGIN_FINISH_TYPE, t.spec); declspecs_add_type(loc, specs, t); break; case RID_TYPEOF: /* * ??? The old parser rejected typeof after other type * specifiers, but is a syntax error the best way of handling * this? */ if (!typespec_ok || seen_type) goto out; attrs_ok = true; seen_type = true; t = c_parser_typeof_specifier(parser); declspecs_add_type(loc, specs, t); break; case RID_CONST: case RID_VOLATILE: case RID_RESTRICT: attrs_ok = true; declspecs_add_qual(specs, c_parser_peek_token(parser)->value); c_parser_consume_token(parser); break; case RID_ATTRIBUTE: if (!attrs_ok) goto out; attrs = c_parser_attributes(parser); declspecs_add_attrs(specs, attrs); break; default: goto out; } } out: ; } /* * Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2). * * enum-specifier: enum attributes[opt] identifier[opt] { enumerator-list } * attributes[opt] enum attributes[opt] identifier[opt] { enumerator-list * , } attributes[opt] enum attributes[opt] identifier * * The form with trailing comma is new in C99. The forms with attributes * are GNU extensions. In GNU C, we accept any expression without commas * in the syntax (assignment expressions, not just conditional * expressions); assignment expressions will be diagnosed as * non-constant. * * enumerator-list: enumerator enumerator-list , enumerator * * enumerator: enumeration-constant enumeration-constant = * constant-expression */ static struct c_typespec c_parser_enum_specifier(c_parser * parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t enum_loc; location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */ gcc_assert(c_parser_next_token_is_keyword(parser, RID_ENUM)); enum_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); attrs = c_parser_attributes(parser); enum_loc = c_parser_peek_token(parser)->location; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token(c_parser_peek_token(parser)); if (c_parser_next_token_is(parser, CPP_NAME)) { ident = c_parser_peek_token(parser)->value; ident_loc = c_parser_peek_token(parser)->location; enum_loc = ident_loc; c_parser_consume_token(parser); } if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) { /* Parse an enum definition. */ struct c_enum_contents the_enum; tree type = start_enum(enum_loc, &the_enum, ident); tree postfix_attrs; /* * We chain the enumerators in reverse order, then put them in * forward order at the end. */ tree values = NULL_TREE; c_parser_consume_token(parser); while (true) { tree enum_id; tree enum_value; tree enum_decl; bool seen_comma; c_token *token; location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */ location_t value_loc; if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } token = c_parser_peek_token(parser); enum_id = token->value; /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token(token); value_loc = token->location; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_EQ)) { c_parser_consume_token(parser); value_loc = c_parser_peek_token(parser)->location; enum_value = c_parser_expr_no_commas(parser, NULL).value; } else enum_value = NULL_TREE; enum_decl = build_enumerator(value_loc, &the_enum, enum_id, enum_value); TREE_CHAIN(enum_decl) = values; values = enum_decl; seen_comma = false; if (c_parser_next_token_is(parser, CPP_COMMA)) { comma_loc = c_parser_peek_token(parser)->location; seen_comma = true; c_parser_consume_token(parser); } if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { if (seen_comma && !flag_isoc99) pedwarn(comma_loc, OPT_pedantic, "comma at end of enumerator list"); c_parser_consume_token(parser); break; } if (!seen_comma) { c_parser_error(parser, "expected %<,%> or %<}%>"); c_parser_skip_until_found(parser, CPP_CLOSE_BRACE, NULL); values = error_mark_node; break; } } postfix_attrs = c_parser_attributes(parser); ret.spec = finish_enum(type, nreverse(values), chainon(attrs, postfix_attrs)); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } else if (!ident) { c_parser_error(parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag(ident_loc, ENUMERAL_TYPE, ident); /* * In ISO C, enumerated types can be referred to only if already * defined. */ if (pedantic && !COMPLETE_TYPE_P(ret.spec)) { gcc_assert(ident); pedwarn(enum_loc, OPT_pedantic, "ISO C forbids forward references to %<enum%> types"); } return ret; } /* * Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1). * * struct-or-union-specifier: struct-or-union attributes[opt] * identifier[opt] { struct-contents } attributes[opt] struct-or-union * attributes[opt] identifier * * struct-contents: struct-declaration-list * * struct-declaration-list: struct-declaration ; struct-declaration-list * struct-declaration ; * * GNU extensions: * * struct-contents: empty struct-declaration struct-declaration-list * struct-declaration * * struct-declaration-list: struct-declaration-list ; ; * * (Note that in the syntax here, unlike that in ISO C, the semicolons are * included here rather than in struct-declaration, in order to describe * the syntax with extra semicolons and missing semicolon at end.) * * Objective-C: * * struct-declaration-list: @defs ( class-name ) * * (Note this does not include a trailing semicolon, but can be followed by * further declarations, and gets a pedwarn-if-pedantic when followed by * a semicolon.) */ static struct c_typespec c_parser_struct_or_union_specifier(c_parser * parser) { struct c_typespec ret; tree attrs; tree ident = NULL_TREE; location_t struct_loc; location_t ident_loc = UNKNOWN_LOCATION; enum tree_code code; switch (c_parser_peek_token(parser)->keyword) { case RID_STRUCT: code = RECORD_TYPE; break; case RID_UNION: code = UNION_TYPE; break; default: gcc_unreachable(); } struct_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); attrs = c_parser_attributes(parser); /* Set the location in case we create a decl now. */ c_parser_set_source_position_from_token(c_parser_peek_token(parser)); if (c_parser_next_token_is(parser, CPP_NAME)) { ident = c_parser_peek_token(parser)->value; ident_loc = c_parser_peek_token(parser)->location; struct_loc = ident_loc; c_parser_consume_token(parser); } if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) { /* * Parse a struct or union definition. Start the scope of the * tag before parsing components. */ struct c_struct_parse_info *struct_info; tree type = start_struct(struct_loc, code, ident, &struct_info); tree postfix_attrs; /* * We chain the components in reverse order, then put them in * forward order at the end. Each struct-declaration may declare * multiple components (comma-separated), so we must use chainon * to join them, although when parsing each struct-declaration we * can use TREE_CHAIN directly. * * The theory behind all this is that there will be more semicolon * separated fields than comma separated fields, and so we'll be * minimizing the number of node traversals required by chainon. */ tree contents = NULL_TREE; c_parser_consume_token(parser); /* * Handle the Objective-C @defs construct, e.g. * foo(sizeof(struct{ @defs(ClassName) }));. */ if (c_parser_next_token_is_keyword(parser, RID_AT_DEFS)) { tree name; gcc_assert(c_dialect_objc()); c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) goto end_at_defs; if (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_token(parser)->id_kind == C_ID_CLASSNAME) { name = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); } else { c_parser_error(parser, "expected class name"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); goto end_at_defs; } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); contents = nreverse(objc_get_class_ivars(name)); } end_at_defs: /* * Parse the struct-declarations and semicolons. Problems with * semicolons are diagnosed here; empty structures are diagnosed * elsewhere. */ while (true) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "extra semicolon in struct or union specified"); c_parser_consume_token(parser); continue; } /* Stop if at the end of the struct or union contents. */ if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { c_parser_consume_token(parser); break; } /* Accept #pragmas at struct scope. */ if (c_parser_next_token_is(parser, CPP_PRAGMA)) { c_parser_pragma(parser, pragma_external); continue; } /* * Parse some comma-separated declarations, but not the * trailing semicolon if any. */ decls = c_parser_struct_declaration(parser); contents = chainon(decls, contents); /* * If no semicolon follows, either we have a parse error or * are at the end of the struct or union and should pedwarn. */ if (c_parser_next_token_is(parser, CPP_SEMICOLON)) c_parser_consume_token(parser); else { if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) pedwarn(c_parser_peek_token(parser)->location, 0, "no semicolon at end of struct or union"); else { c_parser_error(parser, "expected %<;%>"); c_parser_skip_until_found(parser, CPP_CLOSE_BRACE, NULL); break; } } } postfix_attrs = c_parser_attributes(parser); ret.spec = finish_struct(struct_loc, type, nreverse(contents), chainon(attrs, postfix_attrs), struct_info); ret.kind = ctsk_tagdef; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } else if (!ident) { c_parser_error(parser, "expected %<{%>"); ret.spec = error_mark_node; ret.kind = ctsk_tagref; ret.expr = NULL_TREE; ret.expr_const_operands = true; return ret; } ret = parser_xref_tag(ident_loc, code, ident); return ret; } /* * Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1), *without* the * trailing semicolon. * * struct-declaration: specifier-qualifier-list struct-declarator-list * * specifier-qualifier-list: type-specifier specifier-qualifier-list[opt] * type-qualifier specifier-qualifier-list[opt] attributes * specifier-qualifier-list[opt] * * struct-declarator-list: struct-declarator struct-declarator-list , * attributes[opt] struct-declarator * * struct-declarator: declarator attributes[opt] declarator[opt] : * constant-expression attributes[opt] * * GNU extensions: * * struct-declaration: __extension__ struct-declaration * specifier-qualifier-list * * Unlike the ISO C syntax, semicolons are handled elsewhere. The use of * attributes where shown is a GNU extension. In GNU C, we accept any * expression without commas in the syntax (assignment expressions, not * just conditional expressions); assignment expressions will be * diagnosed as non-constant. */ static tree c_parser_struct_declaration(c_parser * parser) { struct c_declspecs *specs; tree prefix_attrs; tree all_prefix_attrs; tree decls; location_t decl_loc; if (c_parser_next_token_is_keyword(parser, RID_EXTENSION)) { int ext; tree decl; ext = disable_extension_diagnostics(); c_parser_consume_token(parser); decl = c_parser_struct_declaration(parser); restore_extension_diagnostics(ext); return decl; } specs = build_null_declspecs(); decl_loc = c_parser_peek_token(parser)->location; c_parser_declspecs(parser, specs, false, true, true); if (parser->error) return NULL_TREE; if (!specs->declspecs_seen_p) { c_parser_error(parser, "expected specifier-qualifier-list"); return NULL_TREE; } finish_declspecs(specs); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { tree ret; if (!specs->type_seen_p) { pedwarn(decl_loc, OPT_pedantic, "ISO C forbids member declarations with no members"); shadow_tag_warned(specs, pedantic); ret = NULL_TREE; } else { /* * Support for unnamed structs or unions as members of * structs or unions (which is [a] useful and [b] supports MS * P-SDK). */ tree attrs = NULL; ret = grokfield(c_parser_peek_token(parser)->location, build_id_declarator(NULL_TREE), specs, NULL_TREE, &attrs); if (ret) decl_attributes(&ret, attrs, 0); } return ret; } pending_xref_error(); prefix_attrs = specs->attrs; all_prefix_attrs = prefix_attrs; specs->attrs = NULL_TREE; decls = NULL_TREE; while (true) { /* Declaring one or more declarators or un-named bit-fields. */ struct c_declarator *declarator; bool dummy = false; if (c_parser_next_token_is(parser, CPP_COLON)) declarator = build_id_declarator(NULL_TREE); else declarator = c_parser_declarator(parser, specs->type_seen_p, C_DTR_NORMAL, &dummy); if (declarator == NULL) { c_parser_skip_to_end_of_block_or_statement(parser); break; } if (c_parser_next_token_is(parser, CPP_COLON) || c_parser_next_token_is(parser, CPP_COMMA) || c_parser_next_token_is(parser, CPP_SEMICOLON) || c_parser_next_token_is(parser, CPP_CLOSE_BRACE) || c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) { tree postfix_attrs = NULL_TREE; tree width = NULL_TREE; tree d; if (c_parser_next_token_is(parser, CPP_COLON)) { c_parser_consume_token(parser); width = c_parser_expr_no_commas(parser, NULL).value; } if (c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes(parser); d = grokfield(c_parser_peek_token(parser)->location, declarator, specs, width, &all_prefix_attrs); decl_attributes(&d, chainon(postfix_attrs, all_prefix_attrs), 0); TREE_CHAIN(d) = decls; decls = d; if (c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) all_prefix_attrs = chainon(c_parser_attributes(parser), prefix_attrs); else all_prefix_attrs = prefix_attrs; if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else if (c_parser_next_token_is(parser, CPP_SEMICOLON) || c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { /* Semicolon consumed in caller. */ break; } else { c_parser_error(parser, "expected %<,%>, %<;%> or %<}%>"); break; } } else { c_parser_error(parser, "expected %<:%>, %<,%>, %<;%>, %<}%> or " "%<__attribute__%>"); break; } } return decls; } /* * Parse a typeof specifier (a GNU extension). * * typeof-specifier: typeof ( expression ) typeof ( type-name ) */ static struct c_typespec c_parser_typeof_specifier(c_parser * parser) { struct c_typespec ret; ret.kind = ctsk_typeof; ret.spec = error_mark_node; ret.expr = NULL_TREE; ret.expr_const_operands = true; gcc_assert(c_parser_next_token_is_keyword(parser, RID_TYPEOF)); c_parser_consume_token(parser); c_inhibit_evaluation_warnings++; in_typeof++; if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { c_inhibit_evaluation_warnings--; in_typeof--; return ret; } if (c_parser_next_token_starts_typename(parser)) { struct c_type_name *type = c_parser_type_name(parser); c_inhibit_evaluation_warnings--; in_typeof--; if (type != NULL) { ret.spec = groktypename(type, &ret.expr, &ret.expr_const_operands); pop_maybe_used(variably_modified_type_p(ret.spec, NULL_TREE)); } } else { bool was_vm; location_t here = c_parser_peek_token(parser)->location; struct c_expr expr = c_parser_expression(parser); c_inhibit_evaluation_warnings--; in_typeof--; if (TREE_CODE(expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD(TREE_OPERAND(expr.value, 1))) error_at(here, "%<typeof%> applied to a bit-field"); ret.spec = TREE_TYPE(expr.value); was_vm = variably_modified_type_p(ret.spec, NULL_TREE); /* * This is returned with the type so that when the type is * evaluated, this can be evaluated. */ if (was_vm) ret.expr = c_fully_fold(expr.value, false, &ret.expr_const_operands); pop_maybe_used(was_vm); } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); return ret; } /* * Parse a declarator, possibly an abstract declarator (C90 6.5.4, 6.5.5, * C99 6.7.5, 6.7.6). If TYPE_SEEN_P then a typedef name may be * redeclared; otherwise it may not. KIND indicates which kind of * declarator is wanted. Returns a valid declarator except in the case * of a syntax error in which case NULL is returned. *SEEN_ID is set to * true if an identifier being declared is seen; this is used to diagnose * bad forms of abstract array declarators and to determine whether an * identifier list is syntactically permitted. * * declarator: pointer[opt] direct-declarator * * direct-declarator: identifier ( attributes[opt] declarator ) * direct-declarator array-declarator direct-declarator ( * parameter-type-list ) direct-declarator ( identifier-list[opt] ) * * pointer: type-qualifier-list[opt] type-qualifier-list[opt] pointer * * type-qualifier-list: type-qualifier attributes type-qualifier-list * type-qualifier type-qualifier-list attributes * * parameter-type-list: parameter-list parameter-list , ... * * parameter-list: parameter-declaration parameter-list , * parameter-declaration * * parameter-declaration: declaration-specifiers declarator attributes[opt] * declaration-specifiers abstract-declarator[opt] attributes[opt] * * identifier-list: identifier identifier-list , identifier * * abstract-declarator: pointer pointer[opt] direct-abstract-declarator * * direct-abstract-declarator: ( attributes[opt] abstract-declarator ) * direct-abstract-declarator[opt] array-declarator * direct-abstract-declarator[opt] ( parameter-type-list[opt] ) * * GNU extensions: * * direct-declarator: direct-declarator ( parameter-forward-declarations * parameter-type-list[opt] ) * * direct-abstract-declarator: direct-abstract-declarator[opt] ( * parameter-forward-declarations parameter-type-list[opt] ) * * parameter-forward-declarations: parameter-list ; * parameter-forward-declarations parameter-list ; * * The uses of attributes shown above are GNU extensions. * * Some forms of array declarator are not included in C99 in the syntax for * abstract declarators; these are disallowed elsewhere. This may be a * defect (DR#289). * * This function also accepts an omitted abstract declarator as being an * abstract declarator, although not part of the formal syntax. */ static struct c_declarator * c_parser_declarator(c_parser * parser, bool type_seen_p, c_dtr_syn kind, bool * seen_id) { /* Parse any initial pointer part. */ if (c_parser_next_token_is(parser, CPP_MULT)) { struct c_declspecs *quals_attrs = build_null_declspecs(); struct c_declarator *inner; c_parser_consume_token(parser); c_parser_declspecs(parser, quals_attrs, false, false, true); inner = c_parser_declarator(parser, type_seen_p, kind, seen_id); if (inner == NULL) return NULL; else return make_pointer_declarator(quals_attrs, inner); } /* * Now we have a direct declarator, direct abstract declarator or * nothing (which counts as a direct abstract declarator here). */ return c_parser_direct_declarator(parser, type_seen_p, kind, seen_id); } /* * Parse a direct declarator or direct abstract declarator; arguments as * c_parser_declarator. */ static struct c_declarator * c_parser_direct_declarator(c_parser * parser, bool type_seen_p, c_dtr_syn kind, bool * seen_id) { /* * The direct declarator must start with an identifier (possibly * omitted) or a parenthesized declarator (possibly abstract). In an * ordinary declarator, initial parentheses must start a * parenthesized declarator. In an abstract declarator or parameter * declarator, they could start a parenthesized declarator or a * parameter list. To tell which, the open parenthesis and any * following attributes must be read. If a declaration specifier * follows, then it is a parameter list; if the specifier is a * typedef name, there might be an ambiguity about redeclaring it, * which is resolved in the direction of treating it as a typedef * name. If a close parenthesis follows, it is also an empty * parameter list, as the syntax does not permit empty abstract * declarators. Otherwise, it is a parenthesized declarator (in * which case the analysis may be repeated inside it, recursively). * * ??? There is an ambiguity in a parameter declaration "int * (__attribute__((foo)) x)", where x is not a typedef name: it could * be an abstract declarator for a function, or declare x with * parentheses. The proper resolution of this ambiguity needs * documenting. At present we follow an accident of the old parser's * implementation, whereby the first parameter must have some * declaration specifiers other than just attributes. Thus as a * parameter declaration it is treated as a parenthesized parameter * named x, and as an abstract declarator it is rejected. * * ??? Also following the old parser, attributes inside an empty * parameter list are ignored, making it a list not yielding a * prototype, rather than giving an error or making it have one * parameter with implicit type int. * * ??? Also following the old parser, typedef names may be redeclared in * declarators, but not Objective-C class names. */ if (kind != C_DTR_ABSTRACT && c_parser_next_token_is(parser, CPP_NAME) && ((type_seen_p && c_parser_peek_token(parser)->id_kind == C_ID_TYPENAME) || c_parser_peek_token(parser)->id_kind == C_ID_ID)) { struct c_declarator *inner = build_id_declarator(c_parser_peek_token(parser)->value); *seen_id = true; inner->id_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); return c_parser_direct_declarator_inner(parser, *seen_id, inner); } if (kind != C_DTR_NORMAL && c_parser_next_token_is(parser, CPP_OPEN_SQUARE)) { struct c_declarator *inner = build_id_declarator(NULL_TREE); return c_parser_direct_declarator_inner(parser, *seen_id, inner); } /* * Either we are at the end of an abstract declarator, or we have * parentheses. */ if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) { tree attrs; struct c_declarator *inner; c_parser_consume_token(parser); attrs = c_parser_attributes(parser); if (kind != C_DTR_NORMAL && (c_parser_next_token_starts_declspecs(parser) || c_parser_next_token_is(parser, CPP_CLOSE_PAREN))) { struct c_arg_info *args = c_parser_parms_declarator(parser, kind == C_DTR_NORMAL, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator(args, build_id_declarator(NULL_TREE)); return c_parser_direct_declarator_inner(parser, *seen_id, inner); } } /* A parenthesized declarator. */ inner = c_parser_declarator(parser, type_seen_p, kind, seen_id); if (inner != NULL && attrs != NULL) inner = build_attrs_declarator(attrs, inner); if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) { c_parser_consume_token(parser); if (inner == NULL) return NULL; else return c_parser_direct_declarator_inner(parser, *seen_id, inner); } else { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } else { if (kind == C_DTR_NORMAL) { c_parser_error(parser, "expected identifier or %<(%>"); return NULL; } else return build_id_declarator(NULL_TREE); } } /* * Parse part of a direct declarator or direct abstract declarator, given * that some (in INNER) has already been parsed; ID_PRESENT is true if an * identifier is present, false for an abstract declarator. */ static struct c_declarator * c_parser_direct_declarator_inner(c_parser * parser, bool id_present, struct c_declarator *inner) { /* Parse a sequence of array declarators and parameter lists. */ if (c_parser_next_token_is(parser, CPP_OPEN_SQUARE)) { location_t brace_loc = c_parser_peek_token(parser)->location; struct c_declarator *declarator; struct c_declspecs *quals_attrs = build_null_declspecs(); bool static_seen; bool star_seen; tree dimen; c_parser_consume_token(parser); c_parser_declspecs(parser, quals_attrs, false, false, true); static_seen = c_parser_next_token_is_keyword(parser, RID_STATIC); if (static_seen) c_parser_consume_token(parser); if (static_seen && !quals_attrs->declspecs_seen_p) c_parser_declspecs(parser, quals_attrs, false, false, true); if (!quals_attrs->declspecs_seen_p) quals_attrs = NULL; /* * If "static" is present, there must be an array dimension. * Otherwise, there may be a dimension, "*", or no dimension. */ if (static_seen) { star_seen = false; dimen = c_parser_expr_no_commas(parser, NULL).value; } else { if (c_parser_next_token_is(parser, CPP_CLOSE_SQUARE)) { dimen = NULL_TREE; star_seen = false; } else if (c_parser_next_token_is(parser, CPP_MULT)) { if (c_parser_peek_2nd_token(parser)->type == CPP_CLOSE_SQUARE) { dimen = NULL_TREE; star_seen = true; c_parser_consume_token(parser); } else { star_seen = false; dimen = c_parser_expr_no_commas(parser, NULL).value; } } else { star_seen = false; dimen = c_parser_expr_no_commas(parser, NULL).value; } } if (c_parser_next_token_is(parser, CPP_CLOSE_SQUARE)) c_parser_consume_token(parser); else { c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, "expected %<]%>"); return NULL; } declarator = build_array_declarator(brace_loc, dimen, quals_attrs, static_seen, star_seen); if (declarator == NULL) return NULL; inner = set_array_declarator_inner(declarator, inner); return c_parser_direct_declarator_inner(parser, id_present, inner); } else if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) { tree attrs; struct c_arg_info *args; c_parser_consume_token(parser); attrs = c_parser_attributes(parser); args = c_parser_parms_declarator(parser, id_present, attrs); if (args == NULL) return NULL; else { inner = build_function_declarator(args, inner); return c_parser_direct_declarator_inner(parser, id_present, inner); } } return inner; } /* * Parse a parameter list or identifier list, including the closing * parenthesis but not the opening one. ATTRS are the attributes at the * start of the list. ID_LIST_OK is true if an identifier list is * acceptable; such a list must not have attributes at the start. */ static struct c_arg_info * c_parser_parms_declarator(c_parser * parser, bool id_list_ok, tree attrs) { push_scope(); declare_parm_level(); /* * If the list starts with an identifier, it is an identifier list. * Otherwise, it is either a prototype list or an empty list. */ if (id_list_ok && !attrs && c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_token(parser)->id_kind == C_ID_ID) { tree list = NULL_TREE, *nextp = &list; while (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_token(parser)->id_kind == C_ID_ID) { *nextp = build_tree_list(NULL_TREE, c_parser_peek_token(parser)->value); nextp = &TREE_CHAIN(*nextp); c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_COMMA)) break; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) { c_parser_error(parser, "expected identifier"); break; } } if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW(&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = list; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token(parser); pop_scope(); return ret; } else { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); pop_scope(); return NULL; } } else { struct c_arg_info *ret = c_parser_parms_list_declarator(parser, attrs); pop_scope(); return ret; } } /* * Parse a parameter list (possibly empty), including the closing * parenthesis but not the opening one. ATTRS are the attributes at the * start of the list. */ static struct c_arg_info * c_parser_parms_list_declarator(c_parser * parser, tree attrs) { bool good_parm = false; /* * ??? Following the old parser, forward parameter declarations may * use abstract declarators, and if no real parameter declarations * follow the forward declarations then this is not diagnosed. Also * note as above that attributes are ignored as the only contents of * the parentheses, or as the only contents after forward * declarations. */ if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) { struct c_arg_info *ret = XOBNEW(&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; c_parser_consume_token(parser); return ret; } if (c_parser_next_token_is(parser, CPP_ELLIPSIS)) { struct c_arg_info *ret = XOBNEW(&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; /* Suppress -Wold-style-definition for this case. */ ret->types = error_mark_node; error_at(c_parser_peek_token(parser)->location, "ISO C requires a named argument before %<...%>"); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) { c_parser_consume_token(parser); return ret; } else { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); return NULL; } } /* * Nonempty list of parameters, either terminated with semicolon * (forward declarations; recurse) or with close parenthesis (normal * function) or with ", ... )" (variadic function). */ while (true) { /* Parse a parameter. */ struct c_parm *parm = c_parser_parameter_declaration(parser, attrs); attrs = NULL_TREE; if (parm != NULL) { good_parm = true; push_parm_decl(parm); } if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { tree new_attrs; c_parser_consume_token(parser); mark_forward_parm_decls(); new_attrs = c_parser_attributes(parser); return c_parser_parms_list_declarator(parser, new_attrs); } if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) { c_parser_consume_token(parser); if (good_parm) return get_parm_info(false); else { struct c_arg_info *ret = XOBNEW(&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } if (!c_parser_require(parser, CPP_COMMA, "expected %<;%>, %<,%> or %<)%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); get_pending_sizes(); return NULL; } if (c_parser_next_token_is(parser, CPP_ELLIPSIS)) { c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) { c_parser_consume_token(parser); if (good_parm) return get_parm_info(true); else { struct c_arg_info *ret = XOBNEW(&parser_obstack, struct c_arg_info); ret->parms = 0; ret->tags = 0; ret->types = 0; ret->others = 0; ret->pending_sizes = 0; ret->had_vla_unspec = 0; return ret; } } else { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); get_pending_sizes(); return NULL; } } } } /* * Parse a parameter declaration. ATTRS are the attributes at the start * of the declaration if it is the first parameter. */ static struct c_parm * c_parser_parameter_declaration(c_parser * parser, tree attrs) { struct c_declspecs *specs; struct c_declarator *declarator; tree prefix_attrs; tree postfix_attrs = NULL_TREE; bool dummy = false; if (!c_parser_next_token_starts_declspecs(parser)) { /* * ??? In some Objective-C cases '...' isn't applicable so there * should be a different message. */ c_parser_error(parser, "expected declaration specifiers or %<...%>"); c_parser_skip_to_end_of_parameter(parser); return NULL; } specs = build_null_declspecs(); if (attrs) { declspecs_add_attrs(specs, attrs); attrs = NULL_TREE; } c_parser_declspecs(parser, specs, true, true, true); finish_declspecs(specs); pending_xref_error(); prefix_attrs = specs->attrs; specs->attrs = NULL_TREE; declarator = c_parser_declarator(parser, specs->type_seen_p, C_DTR_PARM, &dummy); if (declarator == NULL) { c_parser_skip_until_found(parser, CPP_COMMA, NULL); return NULL; } if (c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) postfix_attrs = c_parser_attributes(parser); return build_c_parm(specs, chainon(postfix_attrs, prefix_attrs), declarator); } /* * Parse a string literal in an asm expression. It should not be * translated, and wide string literals are an error although permitted * by the syntax. This is a GNU extension. * * asm-string-literal: string-literal * * ??? At present, following the old parser, the caller needs to have set * lex_untranslated_string to 1. It would be better to follow the C++ * parser rather than using this kludge. */ static tree c_parser_asm_string_literal(c_parser * parser) { tree str; if (c_parser_next_token_is(parser, CPP_STRING)) { str = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); } else if (c_parser_next_token_is(parser, CPP_WSTRING)) { error_at(c_parser_peek_token(parser)->location, "wide string literal in %<asm%>"); str = build_string(1, ""); c_parser_consume_token(parser); } else { c_parser_error(parser, "expected string literal"); str = NULL_TREE; } return str; } /* * Parse a simple asm expression. This is used in restricted contexts, * where a full expression with inputs and outputs does not make sense. * This is a GNU extension. * * simple-asm-expr: asm ( asm-string-literal ) */ static tree c_parser_simple_asm_expr(c_parser * parser) { tree str; gcc_assert(c_parser_next_token_is_keyword(parser, RID_ASM)); /* * ??? Follow the C++ parser rather than using the * lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return NULL_TREE; } str = c_parser_asm_string_literal(parser); parser->lex_untranslated_string = false; if (!c_parser_require(parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } return str; } /* * Parse (possibly empty) attributes. This is a GNU extension. * * attributes: empty attributes attribute * * attribute: __attribute__ ( ( attribute-list ) ) * * attribute-list: attrib attribute_list , attrib * * attrib: empty any-word any-word ( identifier ) any-word ( identifier , * nonempty-expr-list ) any-word ( expr-list ) * * where the "identifier" must not be declared as a type, and "any-word" may * be any identifier (including one declared as a type), a reserved word * storage class specifier, type specifier or type qualifier. ??? This * still leaves out most reserved keywords (following the old parser), * shouldn't we include them, and why not allow identifiers declared as * types to start the arguments? */ static tree c_parser_attributes(c_parser * parser) { tree attrs = NULL_TREE; while (c_parser_next_token_is_keyword(parser, RID_ATTRIBUTE)) { /* * ??? Follow the C++ parser rather than using the * lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; return attrs; } if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = false; c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); return attrs; } /* Parse the attribute list. */ while (c_parser_next_token_is(parser, CPP_COMMA) || c_parser_next_token_is(parser, CPP_NAME) || c_parser_next_token_is(parser, CPP_KEYWORD)) { tree attr, attr_name, attr_args; VEC(tree, gc) * expr_list; if (c_parser_next_token_is(parser, CPP_COMMA)) { c_parser_consume_token(parser); continue; } if (c_parser_next_token_is(parser, CPP_KEYWORD)) { /* * ??? See comment above about what keywords are accepted * here. */ bool ok; switch (c_parser_peek_token(parser)->keyword) { case RID_STATIC: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_EXTERN: case RID_REGISTER: case RID_TYPEDEF: case RID_SHORT: case RID_INLINE: case RID_VOLATILE: case RID_SIGNED: case RID_AUTO: case RID_RESTRICT: case RID_COMPLEX: case RID_THREAD: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: case RID_BOOL: case RID_FRACT: case RID_ACCUM: case RID_SAT: ok = true; break; default: ok = false; break; } if (!ok) break; /* * Accept __attribute__((__const)) as * __attribute__((const)) etc. */ attr_name = ridpointers[(int)c_parser_peek_token(parser)->keyword]; } else attr_name = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_OPEN_PAREN)) { attr = build_tree_list(attr_name, NULL_TREE); attrs = chainon(attrs, attr); continue; } c_parser_consume_token(parser); /* * Parse the attribute contents. If they start with an * identifier which is followed by a comma or close * parenthesis, then the arguments start with that * identifier; otherwise they are an expression list. */ if (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_token(parser)->id_kind == C_ID_ID && ((c_parser_peek_2nd_token(parser)->type == CPP_COMMA) || (c_parser_peek_2nd_token(parser)->type == CPP_CLOSE_PAREN))) { tree arg1 = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) attr_args = build_tree_list(NULL_TREE, arg1); else { tree tree_list; c_parser_consume_token(parser); expr_list = c_parser_expr_list(parser, false, true, NULL); tree_list = build_tree_list_vec(expr_list); attr_args = tree_cons(NULL_TREE, arg1, tree_list); release_tree_vector(expr_list); } } else { if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) attr_args = NULL_TREE; else { expr_list = c_parser_expr_list(parser, false, true, NULL); attr_args = build_tree_list_vec(expr_list); release_tree_vector(expr_list); } } attr = build_tree_list(attr_name, attr_args); if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) c_parser_consume_token(parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } attrs = chainon(attrs, attr); } if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) c_parser_consume_token(parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) c_parser_consume_token(parser); else { parser->lex_untranslated_string = false; c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); return attrs; } parser->lex_untranslated_string = false; } return attrs; } /* * Parse a type name (C90 6.5.5, C99 6.7.6). * * type-name: specifier-qualifier-list abstract-declarator[opt] */ static struct c_type_name * c_parser_type_name(c_parser * parser) { struct c_declspecs *specs = build_null_declspecs(); struct c_declarator *declarator; struct c_type_name *ret; bool dummy = false; c_parser_declspecs(parser, specs, false, true, true); if (!specs->declspecs_seen_p) { c_parser_error(parser, "expected specifier-qualifier-list"); return NULL; } pending_xref_error(); finish_declspecs(specs); declarator = c_parser_declarator(parser, specs->type_seen_p, C_DTR_ABSTRACT, &dummy); if (declarator == NULL) return NULL; ret = XOBNEW(&parser_obstack, struct c_type_name); ret->specs = specs; ret->declarator = declarator; return ret; } /* * Parse an initializer (C90 6.5.7, C99 6.7.8). * * initializer: assignment-expression { initializer-list } { * initializer-list , } * * initializer-list: designation[opt] initializer initializer-list , * designation[opt] initializer * * designation: designator-list = * * designator-list: designator designator-list designator * * designator: array-designator . identifier * * array-designator: [ constant-expression ] * * GNU extensions: * * initializer: { } * * designation: array-designator identifier : * * array-designator: [ constant-expression ... constant-expression ] * * Any expression without commas is accepted in the syntax for the * constant-expressions, with non-constant expressions rejected later. * * This function is only used for top-level initializers; for nested ones, * see c_parser_initval. */ static struct c_expr c_parser_initializer(c_parser * parser) { if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) return c_parser_braced_init(parser, NULL_TREE, false); else { struct c_expr ret; location_t loc = c_parser_peek_token(parser)->location; ret = c_parser_expr_no_commas(parser, NULL); if (TREE_CODE(ret.value) != STRING_CST && TREE_CODE(ret.value) != COMPOUND_LITERAL_EXPR) ret = default_function_array_conversion(loc, ret); return ret; } } /* * Parse a braced initializer list. TYPE is the type specified for a * compound literal, and NULL_TREE for other initializers and for nested * braced lists. NESTED_P is true for nested braced lists, false for the * list of a compound literal or the list that is the top-level * initializer in a declaration. */ static struct c_expr c_parser_braced_init(c_parser * parser, tree type, bool nested_p) { location_t brace_loc = c_parser_peek_token(parser)->location; gcc_assert(c_parser_next_token_is(parser, CPP_OPEN_BRACE)); c_parser_consume_token(parser); if (nested_p) push_init_level(0); else really_start_incremental_init(type); if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { pedwarn(brace_loc, OPT_pedantic, "ISO C forbids empty initializer braces"); } else { /* * Parse a non-empty initializer list, possibly with a trailing * comma. */ while (true) { c_parser_initelt(parser); if (parser->error) break; if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else break; if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) break; } } if (c_parser_next_token_is_not(parser, CPP_CLOSE_BRACE)) { struct c_expr ret; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; c_parser_skip_until_found(parser, CPP_CLOSE_BRACE, "expected %<}%>"); pop_init_level(0); return ret; } c_parser_consume_token(parser); return pop_init_level(0); } /* Parse a nested initializer, including designators. */ static void c_parser_initelt(c_parser * parser) { /* * Parse any designator or designator list. A single array * designator may have the subsequent "=" omitted in GNU C, but a * longer list or a structure member designator may not. */ if (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_2nd_token(parser)->type == CPP_COLON) { /* Old-style structure member designator. */ set_init_label(c_parser_peek_token(parser)->value); /* Use the colon as the error location. */ pedwarn(c_parser_peek_2nd_token(parser)->location, OPT_pedantic, "obsolete use of designated initializer with %<:%>"); c_parser_consume_token(parser); c_parser_consume_token(parser); } else { /* * des_seen is 0 if there have been no designators, 1 if there * has been a single array designator and 2 otherwise. */ int des_seen = 0; /* Location of a designator. */ location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */ while (c_parser_next_token_is(parser, CPP_OPEN_SQUARE) || c_parser_next_token_is(parser, CPP_DOT)) { int des_prev = des_seen; if (!des_seen) des_loc = c_parser_peek_token(parser)->location; if (des_seen < 2) des_seen++; if (c_parser_next_token_is(parser, CPP_DOT)) { des_seen = 2; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_NAME)) { set_init_label(c_parser_peek_token(parser)->value); c_parser_consume_token(parser); } else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_COMMA, NULL); process_init_element(init, false); return; } } else { tree first, second; location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */ /* * ??? Following the old parser, [ objc-receiver * objc-message-args ] is accepted as an initializer, * being distinguished from a designator by what follows * the first assignment expression inside the square * brackets, but after a first array designator a * subsequent square bracket is for Objective-C taken to * start an expression, using the obsolete form of * designated initializer without '=', rather than * possibly being a second level of designation: in LALR * terms, the '[' is shifted rather than reducing * designator to designator-list. */ if (des_prev == 1 && c_dialect_objc()) { des_seen = des_prev; break; } if (des_prev == 0 && c_dialect_objc()) { /* * This might be an array designator or an * Objective-C message expression. If the former, * continue parsing here; if the latter, parse the * remainder of the initializer given the starting * primary-expression. ??? It might make sense to * distinguish when des_prev == 1 as well; see * previous comment. */ tree rec, args; struct c_expr mexpr; c_parser_consume_token(parser); if (c_parser_peek_token(parser)->type == CPP_NAME && ((c_parser_peek_token(parser)->id_kind == C_ID_TYPENAME) || (c_parser_peek_token(parser)->id_kind == C_ID_CLASSNAME))) { /* Type name receiver. */ tree id = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); rec = objc_get_class_reference(id); goto parse_message_args; } first = c_parser_expr_no_commas(parser, NULL).value; if (c_parser_next_token_is(parser, CPP_ELLIPSIS) || c_parser_next_token_is(parser, CPP_CLOSE_SQUARE)) goto array_desig_after_first; /* * Expression receiver. So far only one part without * commas has been parsed; there might be more of the * expression. */ rec = first; while (c_parser_next_token_is(parser, CPP_COMMA)) { struct c_expr next; location_t comma_loc, exp_loc; comma_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; next = c_parser_expr_no_commas(parser, NULL); next = default_function_array_conversion(exp_loc, next); rec = build_compound_expr(comma_loc, rec, next.value); } parse_message_args: /* Now parse the objc-message-args. */ args = c_parser_objc_message_args(parser); c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, "expected %<]%>"); mexpr.value = objc_build_message_expr(build_tree_list(rec, args)); mexpr.original_code = ERROR_MARK; mexpr.original_type = NULL; /* * Now parse and process the remainder of the * initializer, starting with this message expression * as a primary-expression. */ c_parser_initval(parser, &mexpr); return; } c_parser_consume_token(parser); first = c_parser_expr_no_commas(parser, NULL).value; array_desig_after_first: if (c_parser_next_token_is(parser, CPP_ELLIPSIS)) { ellipsis_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); second = c_parser_expr_no_commas(parser, NULL).value; } else second = NULL_TREE; if (c_parser_next_token_is(parser, CPP_CLOSE_SQUARE)) { c_parser_consume_token(parser); set_init_index(first, second); if (second) pedwarn(ellipsis_loc, OPT_pedantic, "ISO C forbids specifying range of elements to initialize"); } else c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } } if (des_seen >= 1) { if (c_parser_next_token_is(parser, CPP_EQ)) { if (!flag_isoc99) pedwarn(des_loc, OPT_pedantic, "ISO C90 forbids specifying subobject to initialize"); c_parser_consume_token(parser); } else { if (des_seen == 1) pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "obsolete use of designated initializer without %<=%>"); else { struct c_expr init; init.value = error_mark_node; init.original_code = ERROR_MARK; init.original_type = NULL; c_parser_error(parser, "expected %<=%>"); c_parser_skip_until_found(parser, CPP_COMMA, NULL); process_init_element(init, false); return; } } } } c_parser_initval(parser, NULL); } /* * Parse a nested initializer; as c_parser_initializer but parses * initializers within braced lists, after any designators have been * applied. If AFTER is not NULL then it is an Objective-C message * expression which is the primary-expression starting the initializer. */ static void c_parser_initval(c_parser * parser, struct c_expr *after) { struct c_expr init; gcc_assert(!after || c_dialect_objc()); if (c_parser_next_token_is(parser, CPP_OPEN_BRACE) && !after) init = c_parser_braced_init(parser, NULL_TREE, true); else { location_t loc = c_parser_peek_token(parser)->location; init = c_parser_expr_no_commas(parser, after); if (init.value != NULL_TREE && TREE_CODE(init.value) != STRING_CST && TREE_CODE(init.value) != COMPOUND_LITERAL_EXPR) init = default_function_array_conversion(loc, init); } process_init_element(init, false); } /* * Parse a compound statement (possibly a function body) (C90 6.6.2, C99 * 6.8.2). * * compound-statement: { block-item-list[opt] } { label-declarations * block-item-list } * * block-item-list: block-item block-item-list block-item * * block-item: nested-declaration statement * * nested-declaration: declaration * * GNU extensions: * * compound-statement: { label-declarations block-item-list } * * nested-declaration: __extension__ nested-declaration * nested-function-definition * * label-declarations: label-declaration label-declarations * label-declaration * * label-declaration: __label__ identifier-list ; * * Allowing the mixing of declarations and code is new in C99. The GNU * syntax also permits (not shown above) labels at the end of compound * statements, which yield an error. We don't allow labels on * declarations; this might seem like a natural extension, but there * would be a conflict between attributes on the label and prefix * attributes on the declaration. ??? The syntax follows the old parser * in requiring something after label declarations. Although they are * erroneous if the labels declared aren't defined, is it useful for the * syntax to be this way? * * OpenMP: * * block-item: openmp-directive * * openmp-directive: barrier-directive flush-directive */ static tree c_parser_compound_statement(c_parser * parser) { tree stmt; location_t brace_loc; brace_loc = c_parser_peek_token(parser)->location; if (!c_parser_require(parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* * Ensure a scope is entered and left anyway to avoid confusion * if we have just prepared to enter a function body. */ stmt = c_begin_compound_stmt(true); c_end_compound_stmt(brace_loc, stmt, true); return error_mark_node; } stmt = c_begin_compound_stmt(true); c_parser_compound_statement_nostart(parser); return c_end_compound_stmt(brace_loc, stmt, true); } /* * Parse a compound statement except for the opening brace. This is used * for parsing both compound statements and statement expressions (which * follow different paths to handling the opening). */ static void c_parser_compound_statement_nostart(c_parser * parser) { bool last_stmt = false; bool last_label = false; bool save_valid_for_pragma = valid_location_for_stdc_pragma_p(); location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */ if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { c_parser_consume_token(parser); return; } mark_valid_location_for_stdc_pragma(true); if (c_parser_next_token_is_keyword(parser, RID_LABEL)) { /* * Read zero or more forward-declarations for labels that nested * functions can jump to. */ mark_valid_location_for_stdc_pragma(false); while (c_parser_next_token_is_keyword(parser, RID_LABEL)) { label_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); /* * Any identifiers, including those declared as type names, * are OK here. */ while (true) { tree label; if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); break; } label = declare_label(c_parser_peek_token(parser)->value); C_DECLARED_LABEL_FLAG(label) = 1; add_stmt(build_stmt(label_loc, DECL_EXPR, label)); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else break; } c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } pedwarn(label_loc, OPT_pedantic, "ISO C forbids label declarations"); } /* We must now have at least one statement, label or declaration. */ if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { mark_valid_location_for_stdc_pragma(save_valid_for_pragma); c_parser_error(parser, "expected declaration or statement"); c_parser_consume_token(parser); return; } while (c_parser_next_token_is_not(parser, CPP_CLOSE_BRACE)) { location_t loc = c_parser_peek_token(parser)->location; if (c_parser_next_token_is_keyword(parser, RID_CASE) || c_parser_next_token_is_keyword(parser, RID_DEFAULT) || (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_2nd_token(parser)->type == CPP_COLON)) { if (c_parser_next_token_is_keyword(parser, RID_CASE)) label_loc = c_parser_peek_2nd_token(parser)->location; else label_loc = c_parser_peek_token(parser)->location; last_label = true; last_stmt = false; mark_valid_location_for_stdc_pragma(false); c_parser_label(parser); } else if (!last_label && c_parser_next_token_starts_declspecs(parser)) { last_label = false; mark_valid_location_for_stdc_pragma(false); c_parser_declaration_or_fndef(parser, true, true, true, true); if (last_stmt) pedwarn_c90(loc, (pedantic && !flag_isoc99) ? OPT_pedantic : OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else if (!last_label && c_parser_next_token_is_keyword(parser, RID_EXTENSION)) { /* * __extension__ can start a declaration, but is also an * unary operator that can start an expression. Consume all * but the last of a possible series of __extension__ to * determine which. */ while (c_parser_peek_2nd_token(parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token(parser)->keyword == RID_EXTENSION)) c_parser_consume_token(parser); if (c_token_starts_declspecs(c_parser_peek_2nd_token(parser))) { int ext; ext = disable_extension_diagnostics(); c_parser_consume_token(parser); last_label = false; mark_valid_location_for_stdc_pragma(false); c_parser_declaration_or_fndef(parser, true, true, true, true); /* * Following the old parser, __extension__ does not * disable this diagnostic. */ restore_extension_diagnostics(ext); if (last_stmt) pedwarn_c90(loc, (pedantic && !flag_isoc99) ? OPT_pedantic : OPT_Wdeclaration_after_statement, "ISO C90 forbids mixed declarations and code"); last_stmt = false; } else goto statement; } else if (c_parser_next_token_is(parser, CPP_PRAGMA)) { /* * External pragmas, and some omp pragmas, are not associated * with regular c code, and so are not to be considered * statements syntactically. This ensures that the user * doesn't put them places that would turn into syntax errors * if the directive were ignored. */ if (c_parser_pragma(parser, pragma_compound)) last_label = false, last_stmt = true; } else if (c_parser_next_token_is(parser, CPP_EOF)) { mark_valid_location_for_stdc_pragma(save_valid_for_pragma); c_parser_error(parser, "expected declaration or statement"); return; } else if (c_parser_next_token_is_keyword(parser, RID_ELSE)) { if (parser->in_if_block) { mark_valid_location_for_stdc_pragma(save_valid_for_pragma); error_at(loc, "" "expected %<}%> before %<else%>"); return; } else { error_at(loc, "%<else%> without a previous %<if%>"); c_parser_consume_token(parser); continue; } } else { statement: last_label = false; last_stmt = true; mark_valid_location_for_stdc_pragma(false); c_parser_statement_after_labels(parser); } parser->error = false; } if (last_label) error_at(label_loc, "label at end of compound statement"); c_parser_consume_token(parser); /* Restore the value we started with. */ mark_valid_location_for_stdc_pragma(save_valid_for_pragma); } /* * Parse a label (C90 6.6.1, C99 6.8.1). * * label: identifier : attributes[opt] case constant-expression : default : * * GNU extensions: * * label: case constant-expression ... constant-expression : * * The use of attributes on labels is a GNU extension. The syntax in GNU C * accepts any expressions without commas, non-constant expressions being * rejected later. */ static void c_parser_label(c_parser * parser) { location_t loc1 = c_parser_peek_token(parser)->location; tree label = NULL_TREE; if (c_parser_next_token_is_keyword(parser, RID_CASE)) { tree exp1, exp2; c_parser_consume_token(parser); exp1 = c_parser_expr_no_commas(parser, NULL).value; if (c_parser_next_token_is(parser, CPP_COLON)) { c_parser_consume_token(parser); label = do_case(loc1, exp1, NULL_TREE); } else if (c_parser_next_token_is(parser, CPP_ELLIPSIS)) { c_parser_consume_token(parser); exp2 = c_parser_expr_no_commas(parser, NULL).value; if (c_parser_require(parser, CPP_COLON, "expected %<:%>")) label = do_case(loc1, exp1, exp2); } else c_parser_error(parser, "expected %<:%> or %<...%>"); } else if (c_parser_next_token_is_keyword(parser, RID_DEFAULT)) { c_parser_consume_token(parser); if (c_parser_require(parser, CPP_COLON, "expected %<:%>")) label = do_case(loc1, NULL_TREE, NULL_TREE); } else { tree name = c_parser_peek_token(parser)->value; tree tlab; tree attrs; location_t loc2 = c_parser_peek_token(parser)->location; gcc_assert(c_parser_next_token_is(parser, CPP_NAME)); c_parser_consume_token(parser); gcc_assert(c_parser_next_token_is(parser, CPP_COLON)); c_parser_consume_token(parser); attrs = c_parser_attributes(parser); tlab = define_label(loc2, name); if (tlab) { decl_attributes(&tlab, attrs, 0); label = add_stmt(build_stmt(loc1, LABEL_EXPR, tlab)); } } if (label) { if (c_parser_next_token_starts_declspecs(parser) && !(c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_2nd_token(parser)->type == CPP_COLON)) { error_at(c_parser_peek_token(parser)->location, "a label can only be part of a statement and " "a declaration is not a statement"); c_parser_declaration_or_fndef(parser, /* fndef_ok */ false, /* nested */ true, /* empty_ok */ false, /* start_attr_ok */ true); } } } /* * Parse a statement (C90 6.6, C99 6.8). * * statement: labeled-statement compound-statement expression-statement * selection-statement iteration-statement jump-statement * * labeled-statement: label statement * * expression-statement: expression[opt] ; * * selection-statement: if-statement switch-statement * * iteration-statement: while-statement do-statement for-statement * * jump-statement: goto identifier ; continue ; break ; return * expression[opt] ; * * GNU extensions: * * statement: asm-statement * * jump-statement: goto * expression ; * * Objective-C: * * statement: objc-throw-statement objc-try-catch-statement * objc-synchronized-statement * * objc-throw-statement: @throw expression ; @throw ; * * OpenMP: * * statement: openmp-construct * * openmp-construct: parallel-construct for-construct sections-construct * single-construct parallel-for-construct parallel-sections-construct * master-construct critical-construct atomic-construct ordered-construct * * parallel-construct: parallel-directive structured-block * * for-construct: for-directive iteration-statement * * sections-construct: sections-directive section-scope * * single-construct: single-directive structured-block * * parallel-for-construct: parallel-for-directive iteration-statement * * parallel-sections-construct: parallel-sections-directive section-scope * * master-construct: master-directive structured-block * * critical-construct: critical-directive structured-block * * atomic-construct: atomic-directive expression-statement * * ordered-construct: ordered-directive structured-block */ static void c_parser_statement(c_parser * parser) { while (c_parser_next_token_is_keyword(parser, RID_CASE) || c_parser_next_token_is_keyword(parser, RID_DEFAULT) || (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_2nd_token(parser)->type == CPP_COLON)) c_parser_label(parser); c_parser_statement_after_labels(parser); } /* Parse a statement, other than a labeled statement. */ static void c_parser_statement_after_labels(c_parser * parser) { location_t loc = c_parser_peek_token(parser)->location; tree stmt = NULL_TREE; bool in_if_block = parser->in_if_block; parser->in_if_block = false; switch (c_parser_peek_token(parser)->type) { case CPP_OPEN_BRACE: add_stmt(c_parser_compound_statement(parser)); break; case CPP_KEYWORD: switch (c_parser_peek_token(parser)->keyword) { case RID_IF: c_parser_if_statement(parser); break; case RID_SWITCH: c_parser_switch_statement(parser); break; case RID_WHILE: c_parser_while_statement(parser); break; case RID_DO: c_parser_do_statement(parser); break; case RID_FOR: c_parser_for_statement(parser); break; case RID_GOTO: c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_NAME)) { stmt = c_finish_goto_label(loc, c_parser_peek_token(parser)->value); c_parser_consume_token(parser); } else if (c_parser_next_token_is(parser, CPP_MULT)) { c_parser_consume_token(parser); stmt = c_finish_goto_ptr(loc, c_parser_expression(parser).value); } else c_parser_error(parser, "expected identifier or %<*%>"); goto expect_semicolon; case RID_CONTINUE: c_parser_consume_token(parser); stmt = c_finish_bc_stmt(loc, &c_cont_label, false); goto expect_semicolon; case RID_BREAK: c_parser_consume_token(parser); stmt = c_finish_bc_stmt(loc, &c_break_label, true); goto expect_semicolon; case RID_RETURN: c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { stmt = c_finish_return(loc, NULL_TREE, NULL_TREE); c_parser_consume_token(parser); } else { struct c_expr expr = c_parser_expression_conv(parser); stmt = c_finish_return(loc, expr.value, expr.original_type); goto expect_semicolon; } break; case RID_ASM: stmt = c_parser_asm_statement(parser); break; case RID_THROW: gcc_assert(c_dialect_objc()); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { stmt = objc_build_throw_stmt(loc, NULL_TREE); c_parser_consume_token(parser); } else { tree expr = c_parser_expression(parser).value; expr = c_fully_fold(expr, false, NULL); stmt = objc_build_throw_stmt(loc, expr); goto expect_semicolon; } break; case RID_TRY: gcc_assert(c_dialect_objc()); c_parser_objc_try_catch_statement(parser); break; case RID_AT_SYNCHRONIZED: gcc_assert(c_dialect_objc()); c_parser_objc_synchronized_statement(parser); break; default: goto expr_stmt; } break; case CPP_SEMICOLON: c_parser_consume_token(parser); break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: /* * Avoid infinite loop in error recovery: * c_parser_skip_until_found stops at a closing nesting delimiter * without consuming it, but here we need to consume it to * proceed further. */ c_parser_error(parser, "expected statement"); c_parser_consume_token(parser); break; case CPP_PRAGMA: c_parser_pragma(parser, pragma_stmt); break; default: expr_stmt: stmt = c_finish_expr_stmt(loc, c_parser_expression_conv(parser).value); expect_semicolon: c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); break; } /* * Two cases cannot and do not have line numbers associated: If stmt * is degenerate, such as "2;", then stmt is an INTEGER_CST, which * cannot hold line numbers. But that's OK because the statement * will either be changed to a MODIFY_EXPR during gimplification of * the statement expr, or discarded. If stmt was compound, but * without new variables, we will have skipped the creation of a BIND * and will have a bare STATEMENT_LIST. But that's OK because * (recursively) all of the component statements should already have * line numbers assigned. ??? Can we discard no-op statements * earlier? */ if (CAN_HAVE_LOCATION_P(stmt) && EXPR_LOCATION(stmt) == UNKNOWN_LOCATION) SET_EXPR_LOCATION(stmt, loc); parser->in_if_block = in_if_block; } /* Parse the condition from an if, do, while or for statements. */ static tree c_parser_condition(c_parser * parser) { location_t loc = c_parser_peek_token(parser)->location; tree cond; cond = c_parser_expression_conv(parser).value; cond = c_objc_common_truthvalue_conversion(loc, cond); cond = c_fully_fold(cond, false, NULL); if (warn_sequence_point) verify_sequence_points(cond); return cond; } /* * Parse a parenthesized condition from an if, do or while statement. * * condition: ( expression ) */ static tree c_parser_paren_condition(c_parser * parser) { tree cond; if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) return error_mark_node; cond = c_parser_condition(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); return cond; } /* Parse a statement which is a block in C99. */ static tree c_parser_c99_block_statement(c_parser * parser) { tree block = c_begin_compound_stmt(flag_isoc99); location_t loc = c_parser_peek_token(parser)->location; c_parser_statement(parser); return c_end_compound_stmt(loc, block, flag_isoc99); } /* * Parse the body of an if statement. This is just parsing a statement * but (a) it is a block in C99, (b) we track whether the body is an if * statement for the sake of -Wparentheses warnings, (c) we handle an * empty body specially for the sake of -Wempty-body warnings, and (d) we * call parser_compound_statement directly because * c_parser_statement_after_labels resets parser->in_if_block. */ static tree c_parser_if_body(c_parser * parser, bool * if_p) { tree block = c_begin_compound_stmt(flag_isoc99); location_t body_loc = c_parser_peek_token(parser)->location; while (c_parser_next_token_is_keyword(parser, RID_CASE) || c_parser_next_token_is_keyword(parser, RID_DEFAULT) || (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_2nd_token(parser)->type == CPP_COLON)) c_parser_label(parser); *if_p = c_parser_next_token_is_keyword(parser, RID_IF); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token(parser)->location; add_stmt(build_empty_stmt(loc)); c_parser_consume_token(parser); if (!c_parser_next_token_is_keyword(parser, RID_ELSE)) warning_at(loc, OPT_Wempty_body, "suggest braces around empty body in an %<if%> statement"); } else if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) add_stmt(c_parser_compound_statement(parser)); else c_parser_statement_after_labels(parser); return c_end_compound_stmt(body_loc, block, flag_isoc99); } /* * Parse the else body of an if statement. This is just parsing a * statement but (a) it is a block in C99, (b) we handle an empty body * specially for the sake of -Wempty-body warnings. */ static tree c_parser_else_body(c_parser * parser) { location_t else_loc = c_parser_peek_token(parser)->location; tree block = c_begin_compound_stmt(flag_isoc99); while (c_parser_next_token_is_keyword(parser, RID_CASE) || c_parser_next_token_is_keyword(parser, RID_DEFAULT) || (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_2nd_token(parser)->type == CPP_COLON)) c_parser_label(parser); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { location_t loc = c_parser_peek_token(parser)->location; warning_at(loc, OPT_Wempty_body, "suggest braces around empty body in an %<else%> statement"); add_stmt(build_empty_stmt(loc)); c_parser_consume_token(parser); } else c_parser_statement_after_labels(parser); return c_end_compound_stmt(else_loc, block, flag_isoc99); } /* * Parse an if statement (C90 6.6.4, C99 6.8.4). * * if-statement: if ( expression ) statement if ( expression ) statement * else statement */ static void c_parser_if_statement(c_parser * parser) { tree block; location_t loc; tree cond; bool first_if = false; tree first_body, second_body; bool in_if_block; gcc_assert(c_parser_next_token_is_keyword(parser, RID_IF)); c_parser_consume_token(parser); block = c_begin_compound_stmt(flag_isoc99); loc = c_parser_peek_token(parser)->location; cond = c_parser_paren_condition(parser); in_if_block = parser->in_if_block; parser->in_if_block = true; first_body = c_parser_if_body(parser, &first_if); parser->in_if_block = in_if_block; if (c_parser_next_token_is_keyword(parser, RID_ELSE)) { c_parser_consume_token(parser); second_body = c_parser_else_body(parser); } else second_body = NULL_TREE; c_finish_if_stmt(loc, cond, first_body, second_body, first_if); add_stmt(c_end_compound_stmt(loc, block, flag_isoc99)); } /* * Parse a switch statement (C90 6.6.4, C99 6.8.4). * * switch-statement: switch (expression) statement */ static void c_parser_switch_statement(c_parser * parser) { tree block, expr, body, save_break; location_t switch_loc = c_parser_peek_token(parser)->location; location_t switch_cond_loc; gcc_assert(c_parser_next_token_is_keyword(parser, RID_SWITCH)); c_parser_consume_token(parser); block = c_begin_compound_stmt(flag_isoc99); if (c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { switch_cond_loc = c_parser_peek_token(parser)->location; expr = c_parser_expression(parser).value; c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { switch_cond_loc = UNKNOWN_LOCATION; expr = error_mark_node; } c_start_case(switch_loc, switch_cond_loc, expr); save_break = c_break_label; c_break_label = NULL_TREE; body = c_parser_c99_block_statement(parser); c_finish_case(body); if (c_break_label) { location_t here = c_parser_peek_token(parser)->location; tree t = build1(LABEL_EXPR, void_type_node, c_break_label); SET_EXPR_LOCATION(t, here); add_stmt(t); } c_break_label = save_break; add_stmt(c_end_compound_stmt(switch_loc, block, flag_isoc99)); } /* * Parse a while statement (C90 6.6.5, C99 6.8.5). * * while-statement: while (expression) statement */ static void c_parser_while_statement(c_parser * parser) { tree block, cond, body, save_break, save_cont; location_t loc; gcc_assert(c_parser_next_token_is_keyword(parser, RID_WHILE)); c_parser_consume_token(parser); block = c_begin_compound_stmt(flag_isoc99); loc = c_parser_peek_token(parser)->location; cond = c_parser_paren_condition(parser); save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement(parser); c_finish_loop(loc, cond, NULL, body, c_break_label, c_cont_label, true); add_stmt(c_end_compound_stmt(loc, block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* * Parse a do statement (C90 6.6.5, C99 6.8.5). * * do-statement: do statement while ( expression ) ; */ static void c_parser_do_statement(c_parser * parser) { tree block, cond, body, save_break, save_cont, new_break, new_cont; location_t loc; gcc_assert(c_parser_next_token_is_keyword(parser, RID_DO)); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) warning_at(c_parser_peek_token(parser)->location, OPT_Wempty_body, "suggest braces around empty body in %<do%> statement"); block = c_begin_compound_stmt(flag_isoc99); loc = c_parser_peek_token(parser)->location; save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement(parser); c_parser_require_keyword(parser, RID_WHILE, "expected %<while%>"); new_break = c_break_label; c_break_label = save_break; new_cont = c_cont_label; c_cont_label = save_cont; cond = c_parser_paren_condition(parser); if (!c_parser_require(parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement(parser); c_finish_loop(loc, cond, NULL, body, new_break, new_cont, false); add_stmt(c_end_compound_stmt(loc, block, flag_isoc99)); } /* * Parse a for statement (C90 6.6.5, C99 6.8.5). * * for-statement: for ( expression[opt] ; expression[opt] ; expression[opt] * ) statement for ( nested-declaration expression[opt] ; expression[opt] * ) statement * * The form with a declaration is new in C99. * * ??? In accordance with the old parser, the declaration may be a nested * function, which is then rejected in check_for_loop_decls, but does it * make any sense for this to be included in the grammar? Note in * particular that the nested function does not include a trailing ';', * whereas the "declaration" production includes one. Also, can we reject * bad declarations earlier and cheaper than check_for_loop_decls? */ static void c_parser_for_statement(c_parser * parser) { tree block, cond, incr, save_break, save_cont, body; location_t loc = c_parser_peek_token(parser)->location; location_t for_loc = c_parser_peek_token(parser)->location; gcc_assert(c_parser_next_token_is_keyword(parser, RID_FOR)); c_parser_consume_token(parser); block = c_begin_compound_stmt(flag_isoc99); if (c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { /* Parse the initialization declaration or expression. */ if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { c_parser_consume_token(parser); c_finish_expr_stmt(loc, NULL_TREE); } else if (c_parser_next_token_starts_declspecs(parser)) { c_parser_declaration_or_fndef(parser, true, true, true, true); check_for_loop_decls(for_loc); } else if (c_parser_next_token_is_keyword(parser, RID_EXTENSION)) { /* * __extension__ can start a declaration, but is also an * unary operator that can start an expression. Consume all * but the last of a possible series of __extension__ to * determine which. */ while (c_parser_peek_2nd_token(parser)->type == CPP_KEYWORD && (c_parser_peek_2nd_token(parser)->keyword == RID_EXTENSION)) c_parser_consume_token(parser); if (c_token_starts_declspecs(c_parser_peek_2nd_token(parser))) { int ext; ext = disable_extension_diagnostics(); c_parser_consume_token(parser); c_parser_declaration_or_fndef(parser, true, true, true, true); restore_extension_diagnostics(ext); check_for_loop_decls(for_loc); } else goto init_expr; } else { init_expr: c_finish_expr_stmt(loc, c_parser_expression(parser).value); c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse the loop condition. */ if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { c_parser_consume_token(parser); cond = NULL_TREE; } else { cond = c_parser_condition(parser); c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } /* Parse the increment expression. */ if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) incr = c_process_expr_stmt(loc, NULL_TREE); else incr = c_process_expr_stmt(loc, c_parser_expression(parser).value); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else { cond = error_mark_node; incr = error_mark_node; } save_break = c_break_label; c_break_label = NULL_TREE; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = c_parser_c99_block_statement(parser); c_finish_loop(loc, cond, incr, body, c_break_label, c_cont_label, true); add_stmt(c_end_compound_stmt(loc, block, flag_isoc99)); c_break_label = save_break; c_cont_label = save_cont; } /* * Parse an asm statement, a GNU extension. This is a full-blown asm * statement with inputs, outputs, clobbers, and volatile tag allowed. * * asm-statement: asm type-qualifier[opt] ( asm-argument ) ; asm * type-qualifier[opt] goto ( asm-goto-argument ) ; * * asm-argument: asm-string-literal asm-string-literal : asm-operands[opt] * asm-string-literal : asm-operands[opt] : asm-operands[opt] * asm-string-literal : asm-operands[opt] : asm-operands[opt] : * asm-clobbers[opt] * * asm-goto-argument: asm-string-literal : : asm-operands[opt] : * asm-clobbers[opt] \ : asm-goto-operands * * Qualifiers other than volatile are accepted in the syntax but warned for. */ static tree c_parser_asm_statement(c_parser * parser) { tree quals, str, outputs, inputs, clobbers, labels, ret; bool simple, is_goto; location_t asm_loc = c_parser_peek_token(parser)->location; int section, nsections; gcc_assert(c_parser_next_token_is_keyword(parser, RID_ASM)); c_parser_consume_token(parser); if (c_parser_next_token_is_keyword(parser, RID_VOLATILE)) { quals = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); } else if (c_parser_next_token_is_keyword(parser, RID_CONST) || c_parser_next_token_is_keyword(parser, RID_RESTRICT)) { warning_at(c_parser_peek_token(parser)->location, 0, "%E qualifier ignored on asm", c_parser_peek_token(parser)->value); quals = NULL_TREE; c_parser_consume_token(parser); } else quals = NULL_TREE; is_goto = false; if (c_parser_next_token_is_keyword(parser, RID_GOTO)) { c_parser_consume_token(parser); is_goto = true; } /* * ??? Follow the C++ parser rather than using the * lex_untranslated_string kludge. */ parser->lex_untranslated_string = true; ret = NULL; if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) goto error; str = c_parser_asm_string_literal(parser); if (str == NULL_TREE) goto error_close_paren; simple = true; outputs = NULL_TREE; inputs = NULL_TREE; clobbers = NULL_TREE; labels = NULL_TREE; if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; /* Parse each colon-delimited section of operands. */ nsections = 3 + is_goto; for (section = 0; section < nsections; ++section) { if (!c_parser_require(parser, CPP_COLON, is_goto ? "expected %<:%>" : "expected %<:%> or %<)%>")) goto error_close_paren; /* Once past any colon, we're no longer a simple asm. */ simple = false; if ((!c_parser_next_token_is(parser, CPP_COLON) && !c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) || section == 3) switch (section) { case 0: /* * For asm goto, we don't allow output operands, but * reserve the slot for a future extension that does * allow them. */ if (!is_goto) outputs = c_parser_asm_operands(parser, false); break; case 1: inputs = c_parser_asm_operands(parser, true); break; case 2: clobbers = c_parser_asm_clobbers(parser); break; case 3: labels = c_parser_asm_goto_operands(parser); break; default: gcc_unreachable(); } if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN) && !is_goto) goto done_asm; } done_asm: if (!c_parser_require(parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); goto error; } if (!c_parser_require(parser, CPP_SEMICOLON, "expected %<;%>")) c_parser_skip_to_end_of_block_or_statement(parser); ret = build_asm_stmt(quals, build_asm_expr(asm_loc, str, outputs, inputs, clobbers, labels, simple)); error: parser->lex_untranslated_string = false; return ret; error_close_paren: c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); goto error; } /* * Parse asm operands, a GNU extension. If CONVERT_P (for inputs but not * outputs), apply the default conversion of functions and arrays to * pointers. * * asm-operands: asm-operand asm-operands , asm-operand * * asm-operand: asm-string-literal ( expression ) [ identifier ] * asm-string-literal ( expression ) */ static tree c_parser_asm_operands(c_parser * parser, bool convert_p) { tree list = NULL_TREE; location_t loc; while (true) { tree name, str; struct c_expr expr; if (c_parser_next_token_is(parser, CPP_OPEN_SQUARE)) { c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_NAME)) { tree id = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); name = build_string(IDENTIFIER_LENGTH(id), IDENTIFIER_POINTER(id)); } else { c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, NULL); return NULL_TREE; } c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, "expected %<]%>"); } else name = NULL_TREE; str = c_parser_asm_string_literal(parser); if (str == NULL_TREE) return NULL_TREE; parser->lex_untranslated_string = false; if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { parser->lex_untranslated_string = true; return NULL_TREE; } loc = c_parser_peek_token(parser)->location; expr = c_parser_expression(parser); if (convert_p) expr = default_function_array_conversion(loc, expr); expr.value = c_fully_fold(expr.value, false, NULL); parser->lex_untranslated_string = true; if (!c_parser_require(parser, CPP_CLOSE_PAREN, "expected %<)%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); return NULL_TREE; } list = chainon(list, build_tree_list(build_tree_list(name, str), expr.value)); if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else break; } return list; } /* * Parse asm clobbers, a GNU extension. * * asm-clobbers: asm-string-literal asm-clobbers , asm-string-literal */ static tree c_parser_asm_clobbers(c_parser * parser) { tree list = NULL_TREE; while (true) { tree str = c_parser_asm_string_literal(parser); if (str) list = tree_cons(NULL_TREE, str, list); else return NULL_TREE; if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else break; } return list; } /* * Parse asm goto labels, a GNU extension. * * asm-goto-operands: identifier asm-goto-operands , identifier */ static tree c_parser_asm_goto_operands(c_parser * parser) { tree list = NULL_TREE; while (true) { tree name, label; if (c_parser_next_token_is(parser, CPP_NAME)) { c_token *tok = c_parser_peek_token(parser); name = tok->value; label = lookup_label_for_goto(tok->location, name); c_parser_consume_token(parser); TREE_USED(label) = 1; } else { c_parser_error(parser, "expected identifier"); return NULL_TREE; } name = build_string(IDENTIFIER_LENGTH(name), IDENTIFIER_POINTER(name)); list = tree_cons(name, label, list); if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else return nreverse(list); } } /* * Parse an expression other than a compound expression; that is, an * assignment expression (C90 6.3.16, C99 6.5.16). If AFTER is not NULL * then it is an Objective-C message expression which is the * primary-expression starting the expression as an initializer. * * assignment-expression: conditional-expression unary-expression * assignment-operator assignment-expression * * assignment-operator: one of = *= /= %= += -= <<= >>= &= ^= |= * * In GNU C we accept any conditional expression on the LHS and diagnose the * invalid lvalue rather than producing a syntax error. */ static struct c_expr c_parser_expr_no_commas(c_parser * parser, struct c_expr *after) { struct c_expr lhs, rhs, ret; enum tree_code code; location_t op_location, exp_location; gcc_assert(!after || c_dialect_objc()); lhs = c_parser_conditional_expression(parser, after); op_location = c_parser_peek_token(parser)->location; switch (c_parser_peek_token(parser)->type) { case CPP_EQ: code = NOP_EXPR; break; case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: code = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; default: return lhs; } c_parser_consume_token(parser); exp_location = c_parser_peek_token(parser)->location; rhs = c_parser_expr_no_commas(parser, NULL); rhs = default_function_array_conversion(exp_location, rhs); ret.value = build_modify_expr(op_location, lhs.value, lhs.original_type, code, exp_location, rhs.value, rhs.original_type); if (code == NOP_EXPR) ret.original_code = MODIFY_EXPR; else { TREE_NO_WARNING(ret.value) = 1; ret.original_code = ERROR_MARK; } ret.original_type = NULL; return ret; } /* * Parse a conditional expression (C90 6.3.15, C99 6.5.15). If AFTER is * not NULL then it is an Objective-C message expression which is the * primary-expression starting the expression as an initializer. * * conditional-expression: logical-OR-expression logical-OR-expression ? * expression : conditional-expression * * GNU extensions: * * conditional-expression: logical-OR-expression ? : conditional-expression */ static struct c_expr c_parser_conditional_expression(c_parser * parser, struct c_expr *after) { struct c_expr cond, exp1, exp2, ret; location_t cond_loc, colon_loc; gcc_assert(!after || c_dialect_objc()); cond = c_parser_binary_expression(parser, after); if (c_parser_next_token_is_not(parser, CPP_QUERY)) return cond; cond_loc = c_parser_peek_token(parser)->location; cond = default_function_array_conversion(cond_loc, cond); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_COLON)) { tree eptype = NULL_TREE; pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "ISO C forbids omitting the middle term of a ?: expression"); if (TREE_CODE(cond.value) == EXCESS_PRECISION_EXPR) { eptype = TREE_TYPE(cond.value); cond.value = TREE_OPERAND(cond.value, 0); } /* Make sure first operand is calculated only once. */ exp1.value = c_save_expr(default_conversion(cond.value)); if (eptype) exp1.value = build1(EXCESS_PRECISION_EXPR, eptype, exp1.value); exp1.original_type = NULL; cond.value = c_objc_common_truthvalue_conversion(cond_loc, exp1.value); c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node; } else { cond.value = c_objc_common_truthvalue_conversion (cond_loc, default_conversion(cond.value)); c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node; exp1 = c_parser_expression_conv(parser); c_inhibit_evaluation_warnings += ((cond.value == truthvalue_true_node) - (cond.value == truthvalue_false_node)); } colon_loc = c_parser_peek_token(parser)->location; if (!c_parser_require(parser, CPP_COLON, "expected %<:%>")) { c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } { location_t exp2_loc = c_parser_peek_token(parser)->location; exp2 = c_parser_conditional_expression(parser, NULL); exp2 = default_function_array_conversion(exp2_loc, exp2); } c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node; ret.value = build_conditional_expr(colon_loc, cond.value, cond.original_code == C_MAYBE_CONST_EXPR, exp1.value, exp1.original_type, exp2.value, exp2.original_type); ret.original_code = ERROR_MARK; if (exp1.value == error_mark_node || exp2.value == error_mark_node) ret.original_type = NULL; else { tree t1, t2; /* * If both sides are enum type, the default conversion will have * made the type of the result be an integer type. We want to * remember the enum types we started with. */ t1 = exp1.original_type ? exp1.original_type : TREE_TYPE(exp1.value); t2 = exp2.original_type ? exp2.original_type : TREE_TYPE(exp2.value); ret.original_type = ((t1 != error_mark_node && t2 != error_mark_node && (TYPE_MAIN_VARIANT(t1) == TYPE_MAIN_VARIANT(t2))) ? t1 : NULL); } return ret; } /* * Parse a binary expression; that is, a logical-OR-expression (C90 * 6.3.5-6.3.14, C99 6.5.5-6.5.14). If AFTER is not NULL then it is an * Objective-C message expression which is the primary-expression * starting the expression as an initializer. * * multiplicative-expression: cast-expression multiplicative-expression * * cast-expression multiplicative-expression / cast-expression * multiplicative-expression % cast-expression * * additive-expression: multiplicative-expression additive-expression + * multiplicative-expression additive-expression - * multiplicative-expression * * shift-expression: additive-expression shift-expression << * additive-expression shift-expression >> additive-expression * * relational-expression: shift-expression relational-expression < * shift-expression relational-expression > shift-expression * relational-expression <= shift-expression relational-expression >= * shift-expression * * equality-expression: relational-expression equality-expression == * relational-expression equality-expression != relational-expression * * AND-expression: equality-expression AND-expression & equality-expression * * exclusive-OR-expression: AND-expression exclusive-OR-expression ^ * AND-expression * * inclusive-OR-expression: exclusive-OR-expression inclusive-OR-expression * | exclusive-OR-expression * * logical-AND-expression: inclusive-OR-expression logical-AND-expression && * inclusive-OR-expression * * logical-OR-expression: logical-AND-expression logical-OR-expression || * logical-AND-expression */ static struct c_expr c_parser_binary_expression(c_parser * parser, struct c_expr *after) { /* * A binary expression is parsed using operator-precedence parsing, * with the operands being cast expressions. All the binary * operators are left-associative. Thus a binary expression is of * form: * * E0 op1 E1 op2 E2 ... * * which we represent on a stack. On the stack, the precedence levels * are strictly increasing. When a new operator is encountered of * higher precedence than that at the top of the stack, it is pushed; * its LHS is the top expression, and its RHS is everything parsed * until it is popped. When a new operator is encountered with * precedence less than or equal to that at the top of the stack, * triples E[i-1] op[i] E[i] are popped and replaced by the result of * the operation until the operator at the top of the stack has lower * precedence than the new operator or there is only one element on * the stack; then the top expression is the LHS of the new operator. * In the case of logical AND and OR expressions, we also need to * adjust c_inhibit_evaluation_warnings as appropriate when the * operators are pushed and popped. */ /* * The precedence levels, where 0 is a dummy lowest level used for * the bottom of the stack. */ enum prec { PREC_NONE, PREC_LOGOR, PREC_LOGAND, PREC_BITOR, PREC_BITXOR, PREC_BITAND, PREC_EQ, PREC_REL, PREC_SHIFT, PREC_ADD, PREC_MULT, NUM_PRECS }; struct { /* The expression at this stack level. */ struct c_expr expr; /* * The precedence of the operator on its left, PREC_NONE at the * bottom of the stack. */ enum prec prec; /* The operation on its left. */ enum tree_code op; /* The source location of this operation. */ location_t loc; } stack[NUM_PRECS]; int sp; /* Location of the binary operator. */ location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */ #define POP \ do { \ switch (stack[sp].op) \ { \ case TRUTH_ANDIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_false_node); \ break; \ case TRUTH_ORIF_EXPR: \ c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \ == truthvalue_true_node); \ break; \ default: \ break; \ } \ stack[sp - 1].expr \ = default_function_array_conversion (stack[sp - 1].loc, \ stack[sp - 1].expr); \ stack[sp].expr \ = default_function_array_conversion (stack[sp].loc, stack[sp].expr); \ stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \ stack[sp].op, \ stack[sp - 1].expr, \ stack[sp].expr); \ sp--; \ } while (0) gcc_assert(!after || c_dialect_objc()); stack[0].loc = c_parser_peek_token(parser)->location; stack[0].expr = c_parser_cast_expression(parser, after); stack[0].prec = PREC_NONE; sp = 0; while (true) { enum prec oprec; enum tree_code ocode; if (parser->error) goto out; switch (c_parser_peek_token(parser)->type) { case CPP_MULT: oprec = PREC_MULT; ocode = MULT_EXPR; break; case CPP_DIV: oprec = PREC_MULT; ocode = TRUNC_DIV_EXPR; break; case CPP_MOD: oprec = PREC_MULT; ocode = TRUNC_MOD_EXPR; break; case CPP_PLUS: oprec = PREC_ADD; ocode = PLUS_EXPR; break; case CPP_MINUS: oprec = PREC_ADD; ocode = MINUS_EXPR; break; case CPP_LSHIFT: oprec = PREC_SHIFT; ocode = LSHIFT_EXPR; break; case CPP_RSHIFT: oprec = PREC_SHIFT; ocode = RSHIFT_EXPR; break; case CPP_LESS: oprec = PREC_REL; ocode = LT_EXPR; break; case CPP_GREATER: oprec = PREC_REL; ocode = GT_EXPR; break; case CPP_LESS_EQ: oprec = PREC_REL; ocode = LE_EXPR; break; case CPP_GREATER_EQ: oprec = PREC_REL; ocode = GE_EXPR; break; case CPP_EQ_EQ: oprec = PREC_EQ; ocode = EQ_EXPR; break; case CPP_NOT_EQ: oprec = PREC_EQ; ocode = NE_EXPR; break; case CPP_AND: oprec = PREC_BITAND; ocode = BIT_AND_EXPR; break; case CPP_XOR: oprec = PREC_BITXOR; ocode = BIT_XOR_EXPR; break; case CPP_OR: oprec = PREC_BITOR; ocode = BIT_IOR_EXPR; break; case CPP_AND_AND: oprec = PREC_LOGAND; ocode = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: oprec = PREC_LOGOR; ocode = TRUTH_ORIF_EXPR; break; default: /* * Not a binary operator, so end of the binary expression. */ goto out; } binary_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); while (oprec <= stack[sp].prec) POP; switch (ocode) { case TRUTH_ANDIF_EXPR: stack[sp].expr = default_function_array_conversion(stack[sp].loc, stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion(stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_false_node); break; case TRUTH_ORIF_EXPR: stack[sp].expr = default_function_array_conversion(stack[sp].loc, stack[sp].expr); stack[sp].expr.value = c_objc_common_truthvalue_conversion (stack[sp].loc, default_conversion(stack[sp].expr.value)); c_inhibit_evaluation_warnings += (stack[sp].expr.value == truthvalue_true_node); break; default: break; } sp++; stack[sp].loc = binary_loc; stack[sp].expr = c_parser_cast_expression(parser, NULL); stack[sp].prec = oprec; stack[sp].op = ocode; stack[sp].loc = binary_loc; } out: while (sp > 0) POP; return stack[0].expr; #undef POP } /* * Parse a cast expression (C90 6.3.4, C99 6.5.4). If AFTER is not NULL * then it is an Objective-C message expression which is the * primary-expression starting the expression as an initializer. * * cast-expression: unary-expression ( type-name ) unary-expression */ static struct c_expr c_parser_cast_expression(c_parser * parser, struct c_expr *after) { location_t cast_loc = c_parser_peek_token(parser)->location; gcc_assert(!after || c_dialect_objc()); if (after) return c_parser_postfix_expression_after_primary(parser, cast_loc, *after); /* * If the expression begins with a parenthesized type name, it may be * either a cast or a compound literal; we need to see whether the * next character is '{' to tell the difference. If not, it is an * unary expression. */ if (c_parser_next_token_is(parser, CPP_OPEN_PAREN) && c_token_starts_typename(c_parser_peek_2nd_token(parser))) { struct c_type_name *type_name; struct c_expr ret; struct c_expr expr; c_parser_consume_token(parser); type_name = c_parser_type_name(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } /* Save casted types in the function's used types hash table. */ used_types_insert(type_name->specs->type); if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) return c_parser_postfix_expression_after_paren_type(parser, type_name, cast_loc); { location_t expr_loc = c_parser_peek_token(parser)->location; expr = c_parser_cast_expression(parser, NULL); expr = default_function_array_conversion(expr_loc, expr); } ret.value = c_cast_expr(cast_loc, type_name, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else return c_parser_unary_expression(parser); } /* * Parse an unary expression (C90 6.3.3, C99 6.5.3). * * unary-expression: postfix-expression ++ unary-expression -- * unary-expression unary-operator cast-expression sizeof * unary-expression sizeof ( type-name ) * * unary-operator: one of & * + - ~ ! * * GNU extensions: * * unary-expression: __alignof__ unary-expression __alignof__ ( type-name ) * && identifier * * unary-operator: one of __extension__ __real__ __imag__ * * In addition, the GNU syntax treats ++ and -- as unary operators, so they * may be applied to cast expressions with errors for non-lvalues given * later. */ static struct c_expr c_parser_unary_expression(c_parser * parser) { int ext; struct c_expr ret, op; location_t op_loc = c_parser_peek_token(parser)->location; location_t exp_loc; ret.original_code = ERROR_MARK; ret.original_type = NULL; switch (c_parser_peek_token(parser)->type) { case CPP_PLUS_PLUS: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, PREINCREMENT_EXPR, op); case CPP_MINUS_MINUS: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, PREDECREMENT_EXPR, op); case CPP_AND: c_parser_consume_token(parser); return parser_build_unary_op(op_loc, ADDR_EXPR, c_parser_cast_expression(parser, NULL)); case CPP_MULT: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); ret.value = build_indirect_ref(op_loc, op.value, RO_UNARY_STAR); return ret; case CPP_PLUS: if (!c_dialect_objc() && !in_system_header) warning_at(op_loc, OPT_Wtraditional, "traditional C rejects the unary plus operator"); c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, CONVERT_EXPR, op); case CPP_MINUS: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, NEGATE_EXPR, op); case CPP_COMPL: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, BIT_NOT_EXPR, op); case CPP_NOT: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, TRUTH_NOT_EXPR, op); case CPP_AND_AND: /* Refer to the address of a label as a pointer. */ c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_NAME)) { ret.value = finish_label_address_expr (c_parser_peek_token(parser)->value, op_loc); c_parser_consume_token(parser); } else { c_parser_error(parser, "expected identifier"); ret.value = error_mark_node; } return ret; case CPP_KEYWORD: switch (c_parser_peek_token(parser)->keyword) { case RID_SIZEOF: return c_parser_sizeof_expression(parser); case RID_ALIGNOF: return c_parser_alignof_expression(parser); case RID_EXTENSION: c_parser_consume_token(parser); ext = disable_extension_diagnostics(); ret = c_parser_cast_expression(parser, NULL); restore_extension_diagnostics(ext); return ret; case RID_REALPART: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, REALPART_EXPR, op); case RID_IMAGPART: c_parser_consume_token(parser); exp_loc = c_parser_peek_token(parser)->location; op = c_parser_cast_expression(parser, NULL); op = default_function_array_conversion(exp_loc, op); return parser_build_unary_op(op_loc, IMAGPART_EXPR, op); default: return c_parser_postfix_expression(parser); } default: return c_parser_postfix_expression(parser); } } /* Parse a sizeof expression. */ static struct c_expr c_parser_sizeof_expression(c_parser * parser) { struct c_expr expr; location_t expr_loc; gcc_assert(c_parser_next_token_is_keyword(parser, RID_SIZEOF)); c_parser_consume_token(parser); c_inhibit_evaluation_warnings++; in_sizeof++; if (c_parser_next_token_is(parser, CPP_OPEN_PAREN) && c_token_starts_typename(c_parser_peek_2nd_token(parser))) { /* * Either sizeof ( type-name ) or sizeof unary-expression * starting with a compound literal. */ struct c_type_name *type_name; c_parser_consume_token(parser); expr_loc = c_parser_peek_token(parser)->location; type_name = c_parser_type_name(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_sizeof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type(parser, type_name, expr_loc); goto sizeof_expr; } /* sizeof ( type-name ). */ c_inhibit_evaluation_warnings--; in_sizeof--; return c_expr_sizeof_type(expr_loc, type_name); } else { expr_loc = c_parser_peek_token(parser)->location; expr = c_parser_unary_expression(parser); sizeof_expr: c_inhibit_evaluation_warnings--; in_sizeof--; if (TREE_CODE(expr.value) == COMPONENT_REF && DECL_C_BIT_FIELD(TREE_OPERAND(expr.value, 1))) error_at(expr_loc, "%<sizeof%> applied to a bit-field"); return c_expr_sizeof_expr(expr_loc, expr); } } /* Parse an alignof expression. */ static struct c_expr c_parser_alignof_expression(c_parser * parser) { struct c_expr expr; location_t loc = c_parser_peek_token(parser)->location; gcc_assert(c_parser_next_token_is_keyword(parser, RID_ALIGNOF)); c_parser_consume_token(parser); c_inhibit_evaluation_warnings++; in_alignof++; if (c_parser_next_token_is(parser, CPP_OPEN_PAREN) && c_token_starts_typename(c_parser_peek_2nd_token(parser))) { /* * Either __alignof__ ( type-name ) or __alignof__ * unary-expression starting with a compound literal. */ location_t loc; struct c_type_name *type_name; struct c_expr ret; c_parser_consume_token(parser); loc = c_parser_peek_token(parser)->location; type_name = c_parser_type_name(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { struct c_expr ret; c_inhibit_evaluation_warnings--; in_alignof--; ret.value = error_mark_node; ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) { expr = c_parser_postfix_expression_after_paren_type(parser, type_name, loc); goto alignof_expr; } /* alignof ( type-name ). */ c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_alignof(loc, groktypename(type_name, NULL, NULL)); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } else { struct c_expr ret; expr = c_parser_unary_expression(parser); alignof_expr: c_inhibit_evaluation_warnings--; in_alignof--; ret.value = c_alignof_expr(loc, expr.value); ret.original_code = ERROR_MARK; ret.original_type = NULL; return ret; } } /* * Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2). * * postfix-expression: primary-expression postfix-expression [ expression ] * postfix-expression ( argument-expression-list[opt] ) * postfix-expression . identifier postfix-expression -> identifier * postfix-expression ++ postfix-expression -- ( type-name ) { * initializer-list } ( type-name ) { initializer-list , } * * argument-expression-list: argument-expression argument-expression-list , * argument-expression * * primary-expression: identifier constant string-literal ( expression ) * * GNU extensions: * * primary-expression: __func__ (treated as a keyword in GNU C) __FUNCTION__ * __PRETTY_FUNCTION__ ( compound-statement ) __builtin_va_arg ( * assignment-expression , type-name ) __builtin_offsetof ( type-name , * offsetof-member-designator ) __builtin_choose_expr ( * assignment-expression , assignment-expression , assignment-expression * ) __builtin_types_compatible_p ( type-name , type-name ) * * offsetof-member-designator: identifier offsetof-member-designator . * identifier offsetof-member-designator [ expression ] * * Objective-C: * * primary-expression: [ objc-receiver objc-message-args ] @selector ( * objc-selector-arg ) @protocol ( identifier ) @encode ( type-name ) * objc-string-literal */ static struct c_expr c_parser_postfix_expression(c_parser * parser) { struct c_expr expr, e1, e2, e3; struct c_type_name *t1, *t2; location_t loc = c_parser_peek_token(parser)->location;; expr.original_code = ERROR_MARK; expr.original_type = NULL; switch (c_parser_peek_token(parser)->type) { case CPP_NUMBER: expr.value = c_parser_peek_token(parser)->value; loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); if (TREE_CODE(expr.value) == FIXED_CST && !targetm.fixed_point_supported_p()) { error_at(loc, "fixed-point types not supported for this target"); expr.value = error_mark_node; } break; case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: expr.value = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: expr.value = c_parser_peek_token(parser)->value; expr.original_code = STRING_CST; c_parser_consume_token(parser); break; case CPP_OBJC_STRING: gcc_assert(c_dialect_objc()); expr.value = objc_build_string_object(c_parser_peek_token(parser)->value); c_parser_consume_token(parser); break; case CPP_NAME: if (c_parser_peek_token(parser)->id_kind != C_ID_ID) { c_parser_error(parser, "expected expression"); expr.value = error_mark_node; break; } { tree id = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); expr.value = build_external_ref(loc, id, (c_parser_peek_token(parser)->type == CPP_OPEN_PAREN), &expr.original_type); } break; case CPP_OPEN_PAREN: /* * A parenthesized expression, statement expression or compound * literal. */ if (c_parser_peek_2nd_token(parser)->type == CPP_OPEN_BRACE) { /* A statement expression. */ tree stmt; location_t brace_loc; c_parser_consume_token(parser); brace_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); if (cur_stmt_list == NULL) { error_at(loc, "braced-group within expression allowed " "only inside a function"); parser->error = true; c_parser_skip_until_found(parser, CPP_CLOSE_BRACE, NULL); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } stmt = c_begin_stmt_expr(); c_parser_compound_statement_nostart(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); pedwarn(loc, OPT_pedantic, "ISO C forbids braced-groups within expressions"); expr.value = c_finish_stmt_expr(brace_loc, stmt); } else if (c_token_starts_typename(c_parser_peek_2nd_token(parser))) { /* * A compound literal. ??? Can we actually get here rather * than going directly to * c_parser_postfix_expression_after_paren_type from * elsewhere? */ location_t loc; struct c_type_name *type_name; c_parser_consume_token(parser); loc = c_parser_peek_token(parser)->location; type_name = c_parser_type_name(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (type_name == NULL) { expr.value = error_mark_node; } else expr = c_parser_postfix_expression_after_paren_type(parser, type_name, loc); } else { /* A parenthesized expression. */ c_parser_consume_token(parser); expr = c_parser_expression(parser); if (TREE_CODE(expr.value) == MODIFY_EXPR) TREE_NO_WARNING(expr.value) = 1; if (expr.original_code != C_MAYBE_CONST_EXPR) expr.original_code = ERROR_MARK; /* Don't change EXPR.ORIGINAL_TYPE. */ c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } break; case CPP_KEYWORD: switch (c_parser_peek_token(parser)->keyword) { case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: expr.value = fname_decl(loc, c_parser_peek_token(parser)->keyword, c_parser_peek_token(parser)->value); c_parser_consume_token(parser); break; case RID_VA_ARG: c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } e1 = c_parser_expr_no_commas(parser, NULL); e1.value = c_fully_fold(e1.value, false, NULL); if (!c_parser_require(parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } loc = c_parser_peek_token(parser)->location; t1 = c_parser_type_name(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (t1 == NULL) { expr.value = error_mark_node; } else { tree type_expr = NULL_TREE; expr.value = c_build_va_arg(loc, e1.value, groktypename(t1, &type_expr, NULL)); if (type_expr) { expr.value = build2(C_MAYBE_CONST_EXPR, TREE_TYPE(expr.value), type_expr, expr.value); C_MAYBE_CONST_EXPR_NON_CONST(expr.value) = true; } } break; case RID_OFFSETOF: c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name(parser); if (t1 == NULL) { expr.value = error_mark_node; break; } if (!c_parser_require(parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree type = groktypename(t1, NULL, NULL); tree offsetof_ref; if (type == error_mark_node) offsetof_ref = error_mark_node; else { offsetof_ref = build1(INDIRECT_REF, type, null_pointer_node); SET_EXPR_LOCATION(offsetof_ref, loc); } /* * Parse the second argument to __builtin_offsetof. We * must have one identifier, and beyond that we want to * accept sub structure and sub array references. */ if (c_parser_next_token_is(parser, CPP_NAME)) { offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token(parser)->value); c_parser_consume_token(parser); while (c_parser_next_token_is(parser, CPP_DOT) || c_parser_next_token_is(parser, CPP_OPEN_SQUARE) || c_parser_next_token_is(parser, CPP_DEREF)) { if (c_parser_next_token_is(parser, CPP_DEREF)) { loc = c_parser_peek_token(parser)->location; offsetof_ref = build_array_ref(loc, offsetof_ref, integer_zero_node); goto do_dot; } else if (c_parser_next_token_is(parser, CPP_DOT)) { do_dot: c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); break; } offsetof_ref = build_component_ref (loc, offsetof_ref, c_parser_peek_token(parser)->value); c_parser_consume_token(parser); } else { tree idx; loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); idx = c_parser_expression(parser).value; idx = c_fully_fold(idx, false, NULL); c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, "expected %<]%>"); offsetof_ref = build_array_ref(loc, offsetof_ref, idx); } } } else c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = fold_offsetof(offsetof_ref, NULL_TREE); } break; case RID_CHOOSE_EXPR: c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } loc = c_parser_peek_token(parser)->location; e1 = c_parser_expr_no_commas(parser, NULL); if (!c_parser_require(parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } e2 = c_parser_expr_no_commas(parser, NULL); if (!c_parser_require(parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } e3 = c_parser_expr_no_commas(parser, NULL); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree c; c = e1.value; if (TREE_CODE(c) != INTEGER_CST || !INTEGRAL_TYPE_P(TREE_TYPE(c))) error_at(loc, "first argument to %<__builtin_choose_expr%> not" " a constant"); constant_expression_warning(c); expr = integer_zerop(c) ? e3 : e2; } break; case RID_TYPES_COMPATIBLE_P: c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name(parser); if (t1 == NULL) { expr.value = error_mark_node; break; } if (!c_parser_require(parser, CPP_COMMA, "expected %<,%>")) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } t2 = c_parser_type_name(parser); if (t2 == NULL) { expr.value = error_mark_node; break; } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree e1, e2; e1 = TYPE_MAIN_VARIANT(groktypename(t1, NULL, NULL)); e2 = TYPE_MAIN_VARIANT(groktypename(t2, NULL, NULL)); expr.value = comptypes(e1, e2) ? build_int_cst(NULL_TREE, 1) : build_int_cst(NULL_TREE, 0); } break; case RID_AT_SELECTOR: gcc_assert(c_dialect_objc()); c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } { tree sel = c_parser_objc_selector_arg(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_selector_expr(loc, sel); } break; case RID_AT_PROTOCOL: gcc_assert(c_dialect_objc()); c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); expr.value = error_mark_node; break; } { tree id = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); expr.value = objc_build_protocol_expr(id); } break; case RID_AT_ENCODE: /* Extension to support C-structures in the archiver. */ gcc_assert(c_dialect_objc()); c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr.value = error_mark_node; break; } t1 = c_parser_type_name(parser); if (t1 == NULL) { expr.value = error_mark_node; c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); { tree type = groktypename(t1, NULL, NULL); expr.value = objc_build_encode_expr(type); } break; default: c_parser_error(parser, "expected expression"); expr.value = error_mark_node; break; } break; case CPP_OPEN_SQUARE: if (c_dialect_objc()) { tree receiver, args; c_parser_consume_token(parser); receiver = c_parser_objc_receiver(parser); args = c_parser_objc_message_args(parser); c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = objc_build_message_expr(build_tree_list(receiver, args)); break; } /* Else fall through to report error. */ default: c_parser_error(parser, "expected expression"); expr.value = error_mark_node; break; } return c_parser_postfix_expression_after_primary(parser, loc, expr); } /* * Parse a postfix expression after a parenthesized type name: the * brace-enclosed initializer of a compound literal, possibly followed by * some postfix operators. This is separate because it is not possible * to tell until after the type name whether a cast expression has a cast * or a compound literal, or whether the operand of sizeof is a * parenthesized type name or starts with a compound literal. TYPE_LOC * is the location where TYPE_NAME starts--the location of the first * token after the parentheses around the type name. */ static struct c_expr c_parser_postfix_expression_after_paren_type(c_parser * parser, struct c_type_name *type_name, location_t type_loc) { tree type; struct c_expr init; bool non_const; struct c_expr expr; location_t start_loc; tree type_expr = NULL_TREE; bool type_expr_const = true; check_compound_literal_type(type_loc, type_name); start_init(NULL_TREE, NULL, 0); type = groktypename(type_name, &type_expr, &type_expr_const); start_loc = c_parser_peek_token(parser)->location; if (type != error_mark_node && C_TYPE_VARIABLE_SIZE(type)) { error_at(type_loc, "compound literal has variable size"); type = error_mark_node; } init = c_parser_braced_init(parser, type, false); finish_init(); maybe_warn_string_init(type, init); if (type != error_mark_node && !ADDR_SPACE_GENERIC_P(TYPE_ADDR_SPACE(type)) && current_function_decl) { error("compound literal qualified by address-space qualifier"); type = error_mark_node; } if (!flag_isoc99) pedwarn(start_loc, OPT_pedantic, "ISO C90 forbids compound literals"); non_const = ((init.value && TREE_CODE(init.value) == CONSTRUCTOR) ? CONSTRUCTOR_NON_CONST(init.value) : init.original_code == C_MAYBE_CONST_EXPR); non_const |= !type_expr_const; expr.value = build_compound_literal(start_loc, type, init.value, non_const); expr.original_code = ERROR_MARK; expr.original_type = NULL; if (type_expr) { if (TREE_CODE(expr.value) == C_MAYBE_CONST_EXPR) { gcc_assert(C_MAYBE_CONST_EXPR_PRE(expr.value) == NULL_TREE); C_MAYBE_CONST_EXPR_PRE(expr.value) = type_expr; } else { gcc_assert(!non_const); expr.value = build2(C_MAYBE_CONST_EXPR, type, type_expr, expr.value); } } return c_parser_postfix_expression_after_primary(parser, start_loc, expr); } /* * Parse a postfix expression after the initial primary or compound * literal; that is, parse a series of postfix operators. * * EXPR_LOC is the location of the primary expression. */ static struct c_expr c_parser_postfix_expression_after_primary(c_parser * parser, location_t expr_loc, struct c_expr expr) { struct c_expr orig_expr; tree ident, idx; VEC(tree, gc) * exprlist; VEC(tree, gc) * origtypes; while (true) { location_t op_loc = c_parser_peek_token(parser)->location; switch (c_parser_peek_token(parser)->type) { case CPP_OPEN_SQUARE: /* Array reference. */ c_parser_consume_token(parser); idx = c_parser_expression(parser).value; c_parser_skip_until_found(parser, CPP_CLOSE_SQUARE, "expected %<]%>"); expr.value = build_array_ref(op_loc, expr.value, idx); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_OPEN_PAREN: /* Function call. */ c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_CLOSE_PAREN)) exprlist = NULL; else exprlist = c_parser_expr_list(parser, true, false, &origtypes); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); orig_expr = expr; /* * FIXME diagnostics: Ideally we want the FUNCNAME, not the * "(" after the FUNCNAME, which is what we have now. */ expr.value = build_function_call_vec(op_loc, expr.value, exprlist, origtypes); expr.original_code = ERROR_MARK; if (TREE_CODE(expr.value) == INTEGER_CST && TREE_CODE(orig_expr.value) == FUNCTION_DECL && DECL_BUILT_IN_CLASS(orig_expr.value) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE(orig_expr.value) == BUILT_IN_CONSTANT_P) expr.original_code = C_MAYBE_CONST_EXPR; expr.original_type = NULL; if (exprlist != NULL) { release_tree_vector(exprlist); release_tree_vector(origtypes); } break; case CPP_DOT: /* Structure element reference. */ c_parser_consume_token(parser); expr = default_function_array_conversion(expr_loc, expr); if (c_parser_next_token_is(parser, CPP_NAME)) ident = c_parser_peek_token(parser)->value; else { c_parser_error(parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token(parser); expr.value = build_component_ref(op_loc, expr.value, ident); expr.original_code = ERROR_MARK; if (TREE_CODE(expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND(expr.value, 1); if (TREE_CODE(field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE(field); } break; case CPP_DEREF: /* Structure element reference. */ c_parser_consume_token(parser); expr = default_function_array_conversion(expr_loc, expr); if (c_parser_next_token_is(parser, CPP_NAME)) ident = c_parser_peek_token(parser)->value; else { c_parser_error(parser, "expected identifier"); expr.value = error_mark_node; expr.original_code = ERROR_MARK; expr.original_type = NULL; return expr; } c_parser_consume_token(parser); expr.value = build_component_ref(op_loc, build_indirect_ref(op_loc, expr.value, RO_ARROW), ident); expr.original_code = ERROR_MARK; if (TREE_CODE(expr.value) != COMPONENT_REF) expr.original_type = NULL; else { /* Remember the original type of a bitfield. */ tree field = TREE_OPERAND(expr.value, 1); if (TREE_CODE(field) != FIELD_DECL) expr.original_type = NULL; else expr.original_type = DECL_BIT_FIELD_TYPE(field); } break; case CPP_PLUS_PLUS: /* Postincrement. */ c_parser_consume_token(parser); expr = default_function_array_conversion(expr_loc, expr); expr.value = build_unary_op(op_loc, POSTINCREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; case CPP_MINUS_MINUS: /* Postdecrement. */ c_parser_consume_token(parser); expr = default_function_array_conversion(expr_loc, expr); expr.value = build_unary_op(op_loc, POSTDECREMENT_EXPR, expr.value, 0); expr.original_code = ERROR_MARK; expr.original_type = NULL; break; default: return expr; } } } /* * Parse an expression (C90 6.3.17, C99 6.5.17). * * expression: assignment-expression expression , assignment-expression */ static struct c_expr c_parser_expression(c_parser * parser) { struct c_expr expr; expr = c_parser_expr_no_commas(parser, NULL); while (c_parser_next_token_is(parser, CPP_COMMA)) { struct c_expr next; location_t loc = c_parser_peek_token(parser)->location; location_t expr_loc; c_parser_consume_token(parser); expr_loc = c_parser_peek_token(parser)->location; next = c_parser_expr_no_commas(parser, NULL); next = default_function_array_conversion(expr_loc, next); expr.value = build_compound_expr(loc, expr.value, next.value); expr.original_code = COMPOUND_EXPR; expr.original_type = next.original_type; } return expr; } /* * Parse an expression and convert functions or arrays to pointers. */ static struct c_expr c_parser_expression_conv(c_parser * parser) { struct c_expr expr; location_t loc = c_parser_peek_token(parser)->location; expr = c_parser_expression(parser); expr = default_function_array_conversion(loc, expr); return expr; } /* * Parse a non-empty list of expressions. If CONVERT_P, convert * functions and arrays to pointers. If FOLD_P, fold the expressions. * * nonempty-expr-list: assignment-expression nonempty-expr-list , * assignment-expression */ static VEC(tree, gc) * c_parser_expr_list(c_parser * parser, bool convert_p, bool fold_p, VEC(tree, gc) ** p_orig_types) { VEC(tree, gc) * ret; VEC(tree, gc) * orig_types; struct c_expr expr; location_t loc = c_parser_peek_token(parser)->location; ret = make_tree_vector(); if (p_orig_types == NULL) orig_types = NULL; else orig_types = make_tree_vector(); expr = c_parser_expr_no_commas(parser, NULL); if (convert_p) expr = default_function_array_conversion(loc, expr); if (fold_p) expr.value = c_fully_fold(expr.value, false, NULL); VEC_quick_push(tree, ret, expr.value); if (orig_types != NULL) VEC_quick_push(tree, orig_types, expr.original_type); while (c_parser_next_token_is(parser, CPP_COMMA)) { c_parser_consume_token(parser); loc = c_parser_peek_token(parser)->location; expr = c_parser_expr_no_commas(parser, NULL); if (convert_p) expr = default_function_array_conversion(loc, expr); if (fold_p) expr.value = c_fully_fold(expr.value, false, NULL); VEC_safe_push(tree, gc, ret, expr.value); if (orig_types != NULL) VEC_safe_push(tree, gc, orig_types, expr.original_type); } if (orig_types != NULL) *p_orig_types = orig_types; return ret; } /* Parse Objective-C-specific constructs. */ /* * Parse an objc-class-definition. * * objc-class-definition: @interface identifier objc-superclass[opt] * objc-protocol-refs[opt] objc-class-instance-variables[opt] * objc-methodprotolist @end @implementation identifier * objc-superclass[opt] objc-class-instance-variables[opt] @interface * identifier ( identifier ) objc-protocol-refs[opt] objc-methodprotolist * @end @implementation identifier ( identifier ) * * objc-superclass: : identifier * * "@interface identifier (" must start "@interface identifier ( identifier * ) ...": objc-methodprotolist in the first production may not start * with a parenthesized identifier as a declarator of a data definition * with no declaration specifiers if the objc-superclass, * objc-protocol-refs and objc-class-instance-variables are omitted. */ static void c_parser_objc_class_definition(c_parser * parser) { bool iface_p; tree id1; tree superclass; if (c_parser_next_token_is_keyword(parser, RID_AT_INTERFACE)) iface_p = true; else if (c_parser_next_token_is_keyword(parser, RID_AT_IMPLEMENTATION)) iface_p = false; else gcc_unreachable(); c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); return; } id1 = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) { tree id2; tree proto = NULL_TREE; c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); return; } id2 = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!iface_p) { objc_start_category_implementation(id1, id2); return; } if (c_parser_next_token_is(parser, CPP_LESS)) proto = c_parser_objc_protocol_refs(parser); objc_start_category_interface(id1, id2, proto); c_parser_objc_methodprotolist(parser); c_parser_require_keyword(parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface(); return; } if (c_parser_next_token_is(parser, CPP_COLON)) { c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); return; } superclass = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); } else superclass = NULL_TREE; if (iface_p) { tree proto = NULL_TREE; if (c_parser_next_token_is(parser, CPP_LESS)) proto = c_parser_objc_protocol_refs(parser); objc_start_class_interface(id1, superclass, proto); } else objc_start_class_implementation(id1, superclass); if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) c_parser_objc_class_instance_variables(parser); if (iface_p) { objc_continue_interface(); c_parser_objc_methodprotolist(parser); c_parser_require_keyword(parser, RID_AT_END, "expected %<@end%>"); objc_finish_interface(); } else { objc_continue_implementation(); return; } } /* * Parse objc-class-instance-variables. * * objc-class-instance-variables: { objc-instance-variable-decl-list[opt] } * * objc-instance-variable-decl-list: objc-visibility-spec * objc-instance-variable-decl ; ; objc-instance-variable-decl-list * objc-visibility-spec objc-instance-variable-decl-list * objc-instance-variable-decl ; objc-instance-variable-decl-list ; * * objc-visibility-spec: @private @protected @public * * objc-instance-variable-decl: struct-declaration */ static void c_parser_objc_class_instance_variables(c_parser * parser) { gcc_assert(c_parser_next_token_is(parser, CPP_OPEN_BRACE)); c_parser_consume_token(parser); while (c_parser_next_token_is_not(parser, CPP_EOF)) { tree decls; /* Parse any stray semicolon. */ if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "extra semicolon in struct or union specified"); c_parser_consume_token(parser); continue; } /* Stop if at the end of the instance variables. */ if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { c_parser_consume_token(parser); break; } /* Parse any objc-visibility-spec. */ if (c_parser_next_token_is_keyword(parser, RID_PRIVATE)) { c_parser_consume_token(parser); objc_set_visibility(2); continue; } else if (c_parser_next_token_is_keyword(parser, RID_PROTECTED)) { c_parser_consume_token(parser); objc_set_visibility(0); continue; } else if (c_parser_next_token_is_keyword(parser, RID_PUBLIC)) { c_parser_consume_token(parser); objc_set_visibility(1); continue; } else if (c_parser_next_token_is(parser, CPP_PRAGMA)) { c_parser_pragma(parser, pragma_external); continue; } /* Parse some comma-separated declarations. */ decls = c_parser_struct_declaration(parser); { /* * Comma-separated instance variables are chained together in * reverse order; add them one by one. */ tree ivar = nreverse(decls); for (; ivar; ivar = TREE_CHAIN(ivar)) objc_add_instance_variable(copy_node(ivar)); } c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } } /* * Parse an objc-class-declaration. * * objc-class-declaration: @class identifier-list ; */ static void c_parser_objc_class_declaration(c_parser * parser) { tree list = NULL_TREE; gcc_assert(c_parser_next_token_is_keyword(parser, RID_CLASS)); c_parser_consume_token(parser); /* * Any identifiers, including those declared as type names, are OK * here. */ while (true) { tree id; if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); break; } id = c_parser_peek_token(parser)->value; list = chainon(list, build_tree_list(NULL_TREE, id)); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else break; } c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_class(list); } /* * Parse an objc-alias-declaration. * * objc-alias-declaration: @compatibility_alias identifier identifier ; */ static void c_parser_objc_alias_declaration(c_parser * parser) { tree id1, id2; gcc_assert(c_parser_next_token_is_keyword(parser, RID_AT_ALIAS)); c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_SEMICOLON, NULL); return; } id1 = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); c_parser_skip_until_found(parser, CPP_SEMICOLON, NULL); return; } id2 = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_alias(id1, id2); } /* * Parse an objc-protocol-definition. * * objc-protocol-definition: @protocol identifier objc-protocol-refs[opt] * objc-methodprotolist @end @protocol identifier-list ; * * "@protocol identifier ;" should be resolved as "@protocol identifier-list * ;": objc-methodprotolist may not start with a semicolon in the first * alternative if objc-protocol-refs are omitted. */ static void c_parser_objc_protocol_definition(c_parser * parser) { gcc_assert(c_parser_next_token_is_keyword(parser, RID_AT_PROTOCOL)); c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); return; } if (c_parser_peek_2nd_token(parser)->type == CPP_COMMA || c_parser_peek_2nd_token(parser)->type == CPP_SEMICOLON) { tree list = NULL_TREE; /* * Any identifiers, including those declared as type names, are * OK here. */ while (true) { tree id; if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); break; } id = c_parser_peek_token(parser)->value; list = chainon(list, build_tree_list(NULL_TREE, id)); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else break; } c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); objc_declare_protocols(list); } else { tree id = c_parser_peek_token(parser)->value; tree proto = NULL_TREE; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_LESS)) proto = c_parser_objc_protocol_refs(parser); parser->objc_pq_context = true; objc_start_protocol(id, proto); c_parser_objc_methodprotolist(parser); c_parser_require_keyword(parser, RID_AT_END, "expected %<@end%>"); parser->objc_pq_context = false; objc_finish_interface(); } } /* * Parse an objc-method-type. * * objc-method-type: + - */ static enum tree_code c_parser_objc_method_type(c_parser * parser) { switch (c_parser_peek_token(parser)->type) { case CPP_PLUS: c_parser_consume_token(parser); return PLUS_EXPR; case CPP_MINUS: c_parser_consume_token(parser); return MINUS_EXPR; default: gcc_unreachable(); } } /* * Parse an objc-method-definition. * * objc-method-definition: objc-method-type objc-method-decl ;[opt] * compound-statement */ static void c_parser_objc_method_definition(c_parser * parser) { enum tree_code type = c_parser_objc_method_type(parser); tree decl; objc_set_method_type(type); parser->objc_pq_context = true; decl = c_parser_objc_method_decl(parser); if (c_parser_next_token_is(parser, CPP_SEMICOLON)) { c_parser_consume_token(parser); pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "extra semicolon in method definition specified"); } if (!c_parser_next_token_is(parser, CPP_OPEN_BRACE)) { c_parser_error(parser, "expected %<{%>"); return; } parser->objc_pq_context = false; objc_start_method_definition(decl); add_stmt(c_parser_compound_statement(parser)); objc_finish_method_definition(current_function_decl); } /* * Parse an objc-methodprotolist. * * objc-methodprotolist: empty objc-methodprotolist objc-methodproto * objc-methodprotolist declaration objc-methodprotolist ; * * The declaration is a data definition, which may be missing declaration * specifiers under the same rules and diagnostics as other data * definitions outside functions, and the stray semicolon is diagnosed * the same way as a stray semicolon outside a function. */ static void c_parser_objc_methodprotolist(c_parser * parser) { while (true) { /* The list is terminated by @end. */ switch (c_parser_peek_token(parser)->type) { case CPP_SEMICOLON: pedwarn(c_parser_peek_token(parser)->location, OPT_pedantic, "ISO C does not allow extra %<;%> outside of a function"); c_parser_consume_token(parser); break; case CPP_PLUS: case CPP_MINUS: c_parser_objc_methodproto(parser); break; case CPP_PRAGMA: c_parser_pragma(parser, pragma_external); break; case CPP_EOF: return; default: if (c_parser_next_token_is_keyword(parser, RID_AT_END)) return; c_parser_declaration_or_fndef(parser, false, true, false, true); break; } } } /* * Parse an objc-methodproto. * * objc-methodproto: objc-method-type objc-method-decl ; */ static void c_parser_objc_methodproto(c_parser * parser) { enum tree_code type = c_parser_objc_method_type(parser); tree decl; objc_set_method_type(type); /* Remember protocol qualifiers in prototypes. */ parser->objc_pq_context = true; decl = c_parser_objc_method_decl(parser); /* Forget protocol qualifiers here. */ parser->objc_pq_context = false; objc_add_method_declaration(decl); c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } /* * Parse an objc-method-decl. * * objc-method-decl: ( objc-type-name ) objc-selector objc-selector ( * objc-type-name ) objc-keyword-selector objc-optparmlist * objc-keyword-selector objc-optparmlist * * objc-keyword-selector: objc-keyword-decl objc-keyword-selector * objc-keyword-decl * * objc-keyword-decl: objc-selector : ( objc-type-name ) identifier * objc-selector : identifier : ( objc-type-name ) identifier : * identifier * * objc-optparmlist: objc-optparms objc-optellipsis * * objc-optparms: empty objc-opt-parms , parameter-declaration * * objc-optellipsis: empty , ... */ static tree c_parser_objc_method_decl(c_parser * parser) { tree type = NULL_TREE; tree sel; tree parms = NULL_TREE; bool ellipsis = false; if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) { c_parser_consume_token(parser); type = c_parser_objc_type_name(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } sel = c_parser_objc_selector(parser); /* * If there is no selector, or a colon follows, we have an * objc-keyword-selector. If there is a selector, and a colon does * not follow, that selector ends the objc-method-decl. */ if (!sel || c_parser_next_token_is(parser, CPP_COLON)) { tree tsel = sel; tree list = NULL_TREE; while (true) { tree atype = NULL_TREE, id, keyworddecl; if (!c_parser_require(parser, CPP_COLON, "expected %<:%>")) break; if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) { c_parser_consume_token(parser); atype = c_parser_objc_type_name(parser); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); return error_mark_node; } id = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); keyworddecl = objc_build_keyword_decl(tsel, atype, id); list = chainon(list, keyworddecl); tsel = c_parser_objc_selector(parser); if (!tsel && c_parser_next_token_is_not(parser, CPP_COLON)) break; } /* * Parse the optional parameter list. Optional Objective-C * method parameters follow the C syntax, and may include '...' * to denote a variable number of arguments. */ parms = make_node(TREE_LIST); while (c_parser_next_token_is(parser, CPP_COMMA)) { struct c_parm *parm; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_ELLIPSIS)) { ellipsis = true; c_parser_consume_token(parser); break; } parm = c_parser_parameter_declaration(parser, NULL_TREE); if (parm == NULL) break; parms = chainon(parms, build_tree_list(NULL_TREE, grokparm(parm))); } sel = list; } return objc_build_method_signature(type, sel, parms, ellipsis); } /* * Parse an objc-type-name. * * objc-type-name: objc-type-qualifiers[opt] type-name * objc-type-qualifiers[opt] * * objc-type-qualifiers: objc-type-qualifier objc-type-qualifiers * objc-type-qualifier * * objc-type-qualifier: one of in out inout bycopy byref oneway */ static tree c_parser_objc_type_name(c_parser * parser) { tree quals = NULL_TREE; struct c_type_name *type_name = NULL; tree type = NULL_TREE; while (true) { c_token *token = c_parser_peek_token(parser); if (token->type == CPP_KEYWORD && (token->keyword == RID_IN || token->keyword == RID_OUT || token->keyword == RID_INOUT || token->keyword == RID_BYCOPY || token->keyword == RID_BYREF || token->keyword == RID_ONEWAY)) { quals = chainon(quals, build_tree_list(NULL_TREE, token->value)); c_parser_consume_token(parser); } else break; } if (c_parser_next_token_starts_typename(parser)) type_name = c_parser_type_name(parser); if (type_name) type = groktypename(type_name, NULL, NULL); return build_tree_list(quals, type); } /* * Parse objc-protocol-refs. * * objc-protocol-refs: < identifier-list > */ static tree c_parser_objc_protocol_refs(c_parser * parser) { tree list = NULL_TREE; gcc_assert(c_parser_next_token_is(parser, CPP_LESS)); c_parser_consume_token(parser); /* * Any identifiers, including those declared as type names, are OK * here. */ while (true) { tree id; if (c_parser_next_token_is_not(parser, CPP_NAME)) { c_parser_error(parser, "expected identifier"); break; } id = c_parser_peek_token(parser)->value; list = chainon(list, build_tree_list(NULL_TREE, id)); c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); else break; } c_parser_require(parser, CPP_GREATER, "expected %<>%>"); return list; } /* * Parse an objc-try-catch-statement. * * objc-try-catch-statement: @try compound-statement objc-catch-list[opt] * @try compound-statement objc-catch-list[opt] @finally * compound-statement * * objc-catch-list: @catch ( parameter-declaration ) compound-statement * objc-catch-list @catch ( parameter-declaration ) compound-statement */ static void c_parser_objc_try_catch_statement(c_parser * parser) { location_t loc; tree stmt; gcc_assert(c_parser_next_token_is_keyword(parser, RID_TRY)); c_parser_consume_token(parser); loc = c_parser_peek_token(parser)->location; stmt = c_parser_compound_statement(parser); objc_begin_try_stmt(loc, stmt); while (c_parser_next_token_is_keyword(parser, RID_CATCH)) { struct c_parm *parm; c_parser_consume_token(parser); if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) break; parm = c_parser_parameter_declaration(parser, NULL_TREE); if (parm == NULL) { c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, NULL); break; } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); objc_begin_catch_clause(grokparm(parm)); if (c_parser_require(parser, CPP_OPEN_BRACE, "expected %<{%>")) c_parser_compound_statement_nostart(parser); objc_finish_catch_clause(); } if (c_parser_next_token_is_keyword(parser, RID_AT_FINALLY)) { location_t finloc; tree finstmt; c_parser_consume_token(parser); finloc = c_parser_peek_token(parser)->location; finstmt = c_parser_compound_statement(parser); objc_build_finally_clause(finloc, finstmt); } objc_finish_try_stmt(); } /* * Parse an objc-synchronized-statement. * * objc-synchronized-statement: @synchronized ( expression ) * compound-statement */ static void c_parser_objc_synchronized_statement(c_parser * parser) { location_t loc; tree expr, stmt; gcc_assert(c_parser_next_token_is_keyword(parser, RID_AT_SYNCHRONIZED)); c_parser_consume_token(parser); loc = c_parser_peek_token(parser)->location; if (c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { expr = c_parser_expression(parser).value; expr = c_fully_fold(expr, false, NULL); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else expr = error_mark_node; stmt = c_parser_compound_statement(parser); objc_build_synchronized(loc, expr, stmt); } /* * Parse an objc-selector; return NULL_TREE without an error if the next * token is not an objc-selector. * * objc-selector: identifier one of enum struct union if else while do for * switch case default break continue return goto asm sizeof typeof * __alignof unsigned long const short volatile signed restrict _Complex * in out inout bycopy byref oneway int char float double void _Bool * * ??? Why this selection of keywords but not, for example, storage class * specifiers? */ static tree c_parser_objc_selector(c_parser * parser) { c_token *token = c_parser_peek_token(parser); tree value = token->value; if (token->type == CPP_NAME) { c_parser_consume_token(parser); return value; } if (token->type != CPP_KEYWORD) return NULL_TREE; switch (token->keyword) { case RID_ENUM: case RID_STRUCT: case RID_UNION: case RID_IF: case RID_ELSE: case RID_WHILE: case RID_DO: case RID_FOR: case RID_SWITCH: case RID_CASE: case RID_DEFAULT: case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: case RID_ASM: case RID_SIZEOF: case RID_TYPEOF: case RID_ALIGNOF: case RID_UNSIGNED: case RID_LONG: case RID_CONST: case RID_SHORT: case RID_VOLATILE: case RID_SIGNED: case RID_RESTRICT: case RID_COMPLEX: case RID_IN: case RID_OUT: case RID_INOUT: case RID_BYCOPY: case RID_BYREF: case RID_ONEWAY: case RID_INT: case RID_CHAR: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: case RID_BOOL: c_parser_consume_token(parser); return value; default: return NULL_TREE; } } /* * Parse an objc-selector-arg. * * objc-selector-arg: objc-selector objc-keywordname-list * * objc-keywordname-list: objc-keywordname objc-keywordname-list * objc-keywordname * * objc-keywordname: objc-selector : : */ static tree c_parser_objc_selector_arg(c_parser * parser) { tree sel = c_parser_objc_selector(parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not(parser, CPP_COLON)) return sel; while (true) { if (!c_parser_require(parser, CPP_COLON, "expected %<:%>")) return list; list = chainon(list, build_tree_list(sel, NULL_TREE)); sel = c_parser_objc_selector(parser); if (!sel && c_parser_next_token_is_not(parser, CPP_COLON)) break; } return list; } /* * Parse an objc-receiver. * * objc-receiver: expression class-name type-name */ static tree c_parser_objc_receiver(c_parser * parser) { if (c_parser_peek_token(parser)->type == CPP_NAME && (c_parser_peek_token(parser)->id_kind == C_ID_TYPENAME || c_parser_peek_token(parser)->id_kind == C_ID_CLASSNAME)) { tree id = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); return objc_get_class_reference(id); } return c_fully_fold(c_parser_expression(parser).value, false, NULL); } /* * Parse objc-message-args. * * objc-message-args: objc-selector objc-keywordarg-list * * objc-keywordarg-list: objc-keywordarg objc-keywordarg-list * objc-keywordarg * * objc-keywordarg: objc-selector : objc-keywordexpr : objc-keywordexpr */ static tree c_parser_objc_message_args(c_parser * parser) { tree sel = c_parser_objc_selector(parser); tree list = NULL_TREE; if (sel && c_parser_next_token_is_not(parser, CPP_COLON)) return sel; while (true) { tree keywordexpr; if (!c_parser_require(parser, CPP_COLON, "expected %<:%>")) return error_mark_node; keywordexpr = c_parser_objc_keywordexpr(parser); list = chainon(list, build_tree_list(sel, keywordexpr)); sel = c_parser_objc_selector(parser); if (!sel && c_parser_next_token_is_not(parser, CPP_COLON)) break; } return list; } /* * Parse an objc-keywordexpr. * * objc-keywordexpr: nonempty-expr-list */ static tree c_parser_objc_keywordexpr(c_parser * parser) { tree ret; VEC(tree, gc) * expr_list = c_parser_expr_list(parser, true, true, NULL); if (VEC_length(tree, expr_list) == 1) { /* * Just return the expression, remove a level of indirection. */ ret = VEC_index(tree, expr_list, 0); } else { /* We have a comma expression, we will collapse later. */ ret = build_tree_list_vec(expr_list); } release_tree_vector(expr_list); return ret; } /* * Handle pragmas. Some OpenMP pragmas are associated with, and * therefore should be considered, statements. ALLOW_STMT is true if * we're within the context of a function and such pragmas are to be * allowed. Returns true if we actually parsed such a pragma. */ static bool c_parser_pragma(c_parser * parser, enum pragma_context context) { unsigned int id; id = c_parser_peek_token(parser)->pragma_kind; gcc_assert(id != PRAGMA_NONE); switch (id) { case PRAGMA_OMP_BARRIER: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error(parser, "%<#pragma omp barrier%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_barrier(parser); return false; case PRAGMA_OMP_FLUSH: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error(parser, "%<#pragma omp flush%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_flush(parser); return false; case PRAGMA_OMP_TASKWAIT: if (context != pragma_compound) { if (context == pragma_stmt) c_parser_error(parser, "%<#pragma omp taskwait%> may only be " "used in compound statements"); goto bad_stmt; } c_parser_omp_taskwait(parser); return false; case PRAGMA_OMP_THREADPRIVATE: c_parser_omp_threadprivate(parser); return false; case PRAGMA_OMP_SECTION: error_at(c_parser_peek_token(parser)->location, "%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); c_parser_skip_until_found(parser, CPP_PRAGMA_EOL, NULL); return false; case PRAGMA_GCC_PCH_PREPROCESS: c_parser_error(parser, "%<#pragma GCC pch_preprocess%> must be first"); c_parser_skip_until_found(parser, CPP_PRAGMA_EOL, NULL); return false; default: if (id < PRAGMA_FIRST_EXTERNAL) { if (context == pragma_external) { bad_stmt: c_parser_error(parser, "expected declaration specifiers"); c_parser_skip_until_found(parser, CPP_PRAGMA_EOL, NULL); return false; } c_parser_omp_construct(parser); return true; } break; } c_parser_consume_pragma(parser); c_invoke_pragma_handler(id); /* * Skip to EOL, but suppress any error message. Those will have been * generated by the handler routine through calling error, as opposed * to calling c_parser_error. */ parser->error = true; c_parser_skip_to_pragma_eol(parser); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex(tree * value) { c_token *tok = c_parser_peek_token(the_parser); enum cpp_ttype ret = tok->type; *value = tok->value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else { if (ret == CPP_KEYWORD) ret = CPP_NAME; c_parser_consume_token(the_parser); } return ret; } static void c_parser_pragma_pch_preprocess(c_parser * parser) { tree name = NULL; c_parser_consume_pragma(parser); if (c_parser_next_token_is(parser, CPP_STRING)) { name = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); } else c_parser_error(parser, "expected string literal"); c_parser_skip_to_pragma_eol(parser); if (name) c_common_pch_pragma(parse_in, TREE_STRING_POINTER(name)); } /* OpenMP 2.5 parsing routines. */ /* * Returns name of the next clause. If the clause is not recognized * PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. * Otherwise appropriate pragma_omp_clause is returned and the token is * consumed. */ static pragma_omp_clause c_parser_omp_clause_name(c_parser * parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (c_parser_next_token_is_keyword(parser, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (c_parser_next_token_is_keyword(parser, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (c_parser_next_token_is(parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER(c_parser_peek_token(parser)->value); switch (p[0]) { case 'c': if (!strcmp("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; break; case 'f': if (!strcmp("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; break; case 'l': if (!strcmp("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; break; case 'n': if (!strcmp("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; break; case 'o': if (!strcmp("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'p': if (!strcmp("private", p)) result = PRAGMA_OMP_CLAUSE_PRIVATE; break; case 'r': if (!strcmp("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; break; case 'u': if (!strcmp("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) c_parser_consume_token(parser); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause(tree clauses, enum omp_clause_code code, const char *name) { tree c; for (c = clauses; c; c = OMP_CLAUSE_CHAIN(c)) if (OMP_CLAUSE_CODE(c) == code) { location_t loc = OMP_CLAUSE_LOCATION(c); error_at(loc, "too many %qs clauses", name); break; } } /* * OpenMP 2.5: variable-list: identifier variable-list , identifier * * If KIND is nonzero, create the appropriate node and install the decl in * OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is * nonzero, CLAUSE_LOC is the location of the clause. * * If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return * the list created. */ static tree c_parser_omp_variable_list(c_parser * parser, location_t clause_loc, enum omp_clause_code kind, tree list) { if (c_parser_next_token_is_not(parser, CPP_NAME) || c_parser_peek_token(parser)->id_kind != C_ID_ID) c_parser_error(parser, "expected identifier"); while (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_token(parser)->id_kind == C_ID_ID) { tree t = lookup_name(c_parser_peek_token(parser)->value); if (t == NULL_TREE) undeclared_variable(c_parser_peek_token(parser)->location, c_parser_peek_token(parser)->value); else if (t == error_mark_node) ; else if (kind != 0) { tree u = build_omp_clause(clause_loc, kind); OMP_CLAUSE_DECL(u) = t; OMP_CLAUSE_CHAIN(u) = list; list = u; } else list = tree_cons(t, NULL_TREE, list); c_parser_consume_token(parser); if (c_parser_next_token_is_not(parser, CPP_COMMA)) break; c_parser_consume_token(parser); } return list; } /* * Similarly, but expect leading and trailing parenthesis. This is a * very common case for omp clauses. */ static tree c_parser_omp_var_list_parens(c_parser * parser, enum omp_clause_code kind, tree list) { /* The clauses location. */ location_t loc = c_parser_peek_token(parser)->location; if (c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { list = c_parser_omp_variable_list(parser, loc, kind, list); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* * OpenMP 3.0: collapse ( constant-expression ) */ static tree c_parser_omp_clause_collapse(c_parser * parser, tree list) { tree c, num = error_mark_node; HOST_WIDE_INT n; location_t loc; check_no_duplicate_clause(list, OMP_CLAUSE_COLLAPSE, "collapse"); loc = c_parser_peek_token(parser)->location; if (c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { num = c_parser_expr_no_commas(parser, NULL).value; c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } if (num == error_mark_node) return list; if (!INTEGRAL_TYPE_P(TREE_TYPE(num)) || !host_integerp(num, 0) || (n = tree_low_cst(num, 0)) <= 0 || (int)n != n) { error_at(loc, "collapse argument needs positive constant integer expression"); return list; } c = build_omp_clause(loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_COLLAPSE_EXPR(c) = num; OMP_CLAUSE_CHAIN(c) = list; return c; } /* * OpenMP 2.5: copyin ( variable-list ) */ static tree c_parser_omp_clause_copyin(c_parser * parser, tree list) { return c_parser_omp_var_list_parens(parser, OMP_CLAUSE_COPYIN, list); } /* * OpenMP 2.5: copyprivate ( variable-list ) */ static tree c_parser_omp_clause_copyprivate(c_parser * parser, tree list) { return c_parser_omp_var_list_parens(parser, OMP_CLAUSE_COPYPRIVATE, list); } /* * OpenMP 2.5: default ( shared | none ) */ static tree c_parser_omp_clause_default(c_parser * parser, tree list) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; location_t loc = c_parser_peek_token(parser)->location; tree c; if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; if (c_parser_next_token_is(parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER(c_parser_peek_token(parser)->value); switch (p[0]) { case 'n': if (strcmp("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } c_parser_consume_token(parser); } else { invalid_kind: c_parser_error(parser, "expected %<none%> or %<shared%>"); } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause(list, OMP_CLAUSE_DEFAULT, "default"); c = build_omp_clause(loc, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN(c) = list; OMP_CLAUSE_DEFAULT_KIND(c) = kind; return c; } /* * OpenMP 2.5: firstprivate ( variable-list ) */ static tree c_parser_omp_clause_firstprivate(c_parser * parser, tree list) { return c_parser_omp_var_list_parens(parser, OMP_CLAUSE_FIRSTPRIVATE, list); } /* * OpenMP 2.5: if ( expression ) */ static tree c_parser_omp_clause_if(c_parser * parser, tree list) { location_t loc = c_parser_peek_token(parser)->location; if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) { tree t = c_parser_paren_condition(parser); tree c; check_no_duplicate_clause(list, OMP_CLAUSE_IF, "if"); c = build_omp_clause(loc, OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR(c) = t; OMP_CLAUSE_CHAIN(c) = list; list = c; } else c_parser_error(parser, "expected %<(%>"); return list; } /* * OpenMP 2.5: lastprivate ( variable-list ) */ static tree c_parser_omp_clause_lastprivate(c_parser * parser, tree list) { return c_parser_omp_var_list_parens(parser, OMP_CLAUSE_LASTPRIVATE, list); } /* * OpenMP 2.5: nowait */ static tree c_parser_omp_clause_nowait(c_parser * parser ATTRIBUTE_UNUSED, tree list) { tree c; location_t loc = c_parser_peek_token(parser)->location; check_no_duplicate_clause(list, OMP_CLAUSE_NOWAIT, "nowait"); c = build_omp_clause(loc, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN(c) = list; return c; } /* * OpenMP 2.5: num_threads ( expression ) */ static tree c_parser_omp_clause_num_threads(c_parser * parser, tree list) { location_t num_threads_loc = c_parser_peek_token(parser)->location; if (c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { location_t expr_loc = c_parser_peek_token(parser)->location; tree c, t = c_parser_expression(parser).value; t = c_fully_fold(t, false, NULL); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (!INTEGRAL_TYPE_P(TREE_TYPE(t))) { c_parser_error(parser, "expected integer expression"); return list; } /* * Attempt to statically determine when the number isn't * positive. */ c = fold_build2_loc(expr_loc, LE_EXPR, boolean_type_node, t, build_int_cst(TREE_TYPE(t), 0)); if (CAN_HAVE_LOCATION_P(c)) SET_EXPR_LOCATION(c, expr_loc); if (c == boolean_true_node) { warning_at(expr_loc, 0, "%<num_threads%> value must be positive"); t = integer_one_node; } check_no_duplicate_clause(list, OMP_CLAUSE_NUM_THREADS, "num_threads"); c = build_omp_clause(num_threads_loc, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR(c) = t; OMP_CLAUSE_CHAIN(c) = list; list = c; } return list; } /* * OpenMP 2.5: ordered */ static tree c_parser_omp_clause_ordered(c_parser * parser, tree list) { tree c; check_no_duplicate_clause(list, OMP_CLAUSE_ORDERED, "ordered"); c = build_omp_clause(c_parser_peek_token(parser)->location, OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN(c) = list; return c; } /* * OpenMP 2.5: private ( variable-list ) */ static tree c_parser_omp_clause_private(c_parser * parser, tree list) { return c_parser_omp_var_list_parens(parser, OMP_CLAUSE_PRIVATE, list); } /* * OpenMP 2.5: reduction ( reduction-operator : variable-list ) * * reduction-operator: One of: + * - & ^ | && || */ static tree c_parser_omp_clause_reduction(c_parser * parser, tree list) { location_t clause_loc = c_parser_peek_token(parser)->location; if (c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) { enum tree_code code; switch (c_parser_peek_token(parser)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; default: c_parser_error(parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, " "%<^%>, %<|%>, %<&&%>, or %<||%>"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, 0); return list; } c_parser_consume_token(parser); if (c_parser_require(parser, CPP_COLON, "expected %<:%>")) { tree nl, c; nl = c_parser_omp_variable_list(parser, clause_loc, OMP_CLAUSE_REDUCTION, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN(c)) OMP_CLAUSE_REDUCTION_CODE(c) = code; list = nl; } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } return list; } /* * OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , * expression ) * * schedule-kind: static | dynamic | guided | runtime | auto */ static tree c_parser_omp_clause_schedule(c_parser * parser, tree list) { tree c, t; location_t loc = c_parser_peek_token(parser)->location; if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; c = build_omp_clause(loc, OMP_CLAUSE_SCHEDULE); if (c_parser_next_token_is(parser, CPP_NAME)) { tree kind = c_parser_peek_token(parser)->value; const char *p = IDENTIFIER_POINTER(kind); switch (p[0]) { case 'd': if (strcmp("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND(c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND(c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND(c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (c_parser_next_token_is_keyword(parser, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND(c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (c_parser_next_token_is_keyword(parser, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND(c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_COMMA)) { location_t here; c_parser_consume_token(parser); here = c_parser_peek_token(parser)->location; t = c_parser_expr_no_commas(parser, NULL).value; t = c_fully_fold(t, false, NULL); if (OMP_CLAUSE_SCHEDULE_KIND(c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at(here, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND(c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at(here, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else if (TREE_CODE(TREE_TYPE(t)) == INTEGER_TYPE) OMP_CLAUSE_SCHEDULE_CHUNK_EXPR(c) = t; else c_parser_error(parser, "expected integer expression"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<,%> or %<)%>"); check_no_duplicate_clause(list, OMP_CLAUSE_SCHEDULE, "schedule"); OMP_CLAUSE_CHAIN(c) = list; return c; invalid_kind: c_parser_error(parser, "invalid schedule kind"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, 0); return list; } /* * OpenMP 2.5: shared ( variable-list ) */ static tree c_parser_omp_clause_shared(c_parser * parser, tree list) { return c_parser_omp_var_list_parens(parser, OMP_CLAUSE_SHARED, list); } /* * OpenMP 3.0: untied */ static tree c_parser_omp_clause_untied(c_parser * parser ATTRIBUTE_UNUSED, tree list) { tree c; /* FIXME: Should we allow duplicates? */ check_no_duplicate_clause(list, OMP_CLAUSE_UNTIED, "untied"); c = build_omp_clause(c_parser_peek_token(parser)->location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN(c) = list; return c; } /* * Parse all OpenMP clauses. The set clauses allowed by the directive is * a bitmask in MASK. Return the list of clauses found; the result of * clause default goes in *pdefault. */ static tree c_parser_omp_all_clauses(c_parser * parser, unsigned int mask, const char *where) { tree clauses = NULL; bool first = true; while (c_parser_next_token_is_not(parser, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && c_parser_next_token_is(parser, CPP_COMMA)) c_parser_consume_token(parser); first = false; here = c_parser_peek_token(parser)->location; c_kind = c_parser_omp_clause_name(parser); switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = c_parser_omp_clause_collapse(parser, clauses); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = c_parser_omp_clause_copyin(parser, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = c_parser_omp_clause_copyprivate(parser, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = c_parser_omp_clause_default(parser, clauses); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = c_parser_omp_clause_firstprivate(parser, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = c_parser_omp_clause_if(parser, clauses); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = c_parser_omp_clause_lastprivate(parser, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = c_parser_omp_clause_nowait(parser, clauses); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = c_parser_omp_clause_num_threads(parser, clauses); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = c_parser_omp_clause_ordered(parser, clauses); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = c_parser_omp_clause_private(parser, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = c_parser_omp_clause_reduction(parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = c_parser_omp_clause_schedule(parser, clauses); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = c_parser_omp_clause_shared(parser, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = c_parser_omp_clause_untied(parser, clauses); c_name = "untied"; break; default: c_parser_error(parser, "expected %<#pragma omp%> clause"); goto saw_error; } if (((mask >> c_kind) & 1) == 0 && !parser->error) { /* * Remove the invalid clause(s) from the list to avoid * confusing the rest of the compiler. */ clauses = prev; error_at(here, "%qs is not valid for %qs", c_name, where); } } saw_error: c_parser_skip_to_pragma_eol(parser); return c_finish_omp_clauses(clauses); } /* * OpenMP 2.5: structured-block: statement * * In practice, we're also interested in adding the statement to an outer * node. So it is convenient if we work around the fact that * c_parser_statement calls add_stmt. */ static tree c_parser_omp_structured_block(c_parser * parser) { tree stmt = push_stmt_list(); c_parser_statement(parser); return pop_stmt_list(stmt); } /* * OpenMP 2.5: # pragma omp atomic new-line expression-stmt * * expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, * &, ^, |, <<, >> * * where x is an lvalue expression with scalar type. * * LOC is the location of the #pragma token. */ static void c_parser_omp_atomic(location_t loc, c_parser * parser) { tree lhs, rhs; tree stmt; enum tree_code code; struct c_expr rhs_expr; c_parser_skip_to_pragma_eol(parser); lhs = c_parser_unary_expression(parser).value; lhs = c_fully_fold(lhs, false, NULL); switch (TREE_CODE(lhs)) { case ERROR_MARK: saw_error: c_parser_skip_to_end_of_block_or_statement(parser); return; case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: lhs = TREE_OPERAND(lhs, 0); code = PLUS_EXPR; rhs = integer_one_node; break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: lhs = TREE_OPERAND(lhs, 0); code = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE(TREE_OPERAND(lhs, 0)) == SAVE_EXPR && TREE_CODE(TREE_OPERAND(lhs, 1)) == COMPOUND_EXPR && TREE_CODE(TREE_OPERAND(TREE_OPERAND(lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND(TREE_OPERAND(lhs, 1), 1) == TREE_OPERAND(lhs, 0) && TREE_CODE(TREE_TYPE(TREE_OPERAND(TREE_OPERAND (TREE_OPERAND(lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND(TREE_OPERAND(lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE(lhs) == MODIFY_EXPR && TREE_CODE(TREE_TYPE(TREE_OPERAND(lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep(TREE_OPERAND(lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND(lhs, 1); lhs = TREE_OPERAND(lhs, 0); code = NOP_EXPR; break; } if (TREE_CODE(TREE_OPERAND(lhs, 1)) == TRUTH_NOT_EXPR && TREE_OPERAND(lhs, 0) == TREE_OPERAND(TREE_OPERAND(lhs, 1), 0)) { /* This is pre or post decrement. */ rhs = TREE_OPERAND(lhs, 1); lhs = TREE_OPERAND(lhs, 0); code = NOP_EXPR; break; } } /* FALLTHRU */ default: switch (c_parser_peek_token(parser)->type) { case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; default: c_parser_error(parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } c_parser_consume_token(parser); { location_t rhs_loc = c_parser_peek_token(parser)->location; rhs_expr = c_parser_expression(parser); rhs_expr = default_function_array_conversion(rhs_loc, rhs_expr); } rhs = rhs_expr.value; rhs = c_fully_fold(rhs, false, NULL); break; } stmt = c_finish_omp_atomic(loc, code, lhs, rhs); if (stmt != error_mark_node) add_stmt(stmt); c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } /* * OpenMP 2.5: # pragma omp barrier new-line */ static void c_parser_omp_barrier(c_parser * parser) { location_t loc = c_parser_peek_token(parser)->location; c_parser_consume_pragma(parser); c_parser_skip_to_pragma_eol(parser); c_finish_omp_barrier(loc); } /* * OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block * * LOC is the location of the #pragma itself. */ static tree c_parser_omp_critical(location_t loc, c_parser * parser) { tree stmt, name = NULL; if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) { c_parser_consume_token(parser); if (c_parser_next_token_is(parser, CPP_NAME)) { name = c_parser_peek_token(parser)->value; c_parser_consume_token(parser); c_parser_require(parser, CPP_CLOSE_PAREN, "expected %<)%>"); } else c_parser_error(parser, "expected identifier"); } else if (c_parser_next_token_is_not(parser, CPP_PRAGMA_EOL)) c_parser_error(parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol(parser); stmt = c_parser_omp_structured_block(parser); return c_finish_omp_critical(loc, stmt, name); } /* * OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line * * flush-vars: ( variable-list ) */ static void c_parser_omp_flush(c_parser * parser) { location_t loc = c_parser_peek_token(parser)->location; c_parser_consume_pragma(parser); if (c_parser_next_token_is(parser, CPP_OPEN_PAREN)) c_parser_omp_var_list_parens(parser, OMP_CLAUSE_ERROR, NULL); else if (c_parser_next_token_is_not(parser, CPP_PRAGMA_EOL)) c_parser_error(parser, "expected %<(%> or end of line"); c_parser_skip_to_pragma_eol(parser); c_finish_omp_flush(loc); } /* * Parse the restricted form of the for statement allowed by OpenMP. The * real trick here is to determine the loop control variable early so * that we can push a new decl if necessary to make it private. LOC is * the location of the OMP in "#pragma omp". */ static tree c_parser_omp_for_loop(location_t loc, c_parser * parser, tree clauses, tree * par_clauses) { tree decl, cond, incr, save_break, save_cont, body, init, stmt, cl; tree declv, condv, incrv, initv, for_block = NULL, ret = NULL; bool fail = false, open_brace_parsed = false; int i, collapse = 1, nbraces = 0; location_t for_loc; for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN(cl)) if (OMP_CLAUSE_CODE(cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_low_cst(OMP_CLAUSE_COLLAPSE_EXPR(cl), 0); gcc_assert(collapse >= 1); declv = make_tree_vec(collapse); initv = make_tree_vec(collapse); condv = make_tree_vec(collapse); incrv = make_tree_vec(collapse); if (!c_parser_next_token_is_keyword(parser, RID_FOR)) { c_parser_error(parser, "for statement expected"); return NULL; } for_loc = c_parser_peek_token(parser)->location; c_parser_consume_token(parser); for (i = 0; i < collapse; i++) { int bracecount = 0; if (!c_parser_require(parser, CPP_OPEN_PAREN, "expected %<(%>")) goto pop_scopes; /* Parse the initialization declaration or expression. */ if (c_parser_next_token_starts_declspecs(parser)) { if (i > 0) for_block = tree_cons(NULL, c_begin_compound_stmt(true), for_block); c_parser_declaration_or_fndef(parser, true, true, true, true); decl = check_for_loop_decls(for_loc); if (decl == NULL) goto error_init; if (DECL_INITIAL(decl) == error_mark_node) decl = error_mark_node; init = decl; } else if (c_parser_next_token_is(parser, CPP_NAME) && c_parser_peek_2nd_token(parser)->type == CPP_EQ) { struct c_expr decl_exp; struct c_expr init_exp; location_t init_loc; decl_exp = c_parser_postfix_expression(parser); decl = decl_exp.value; c_parser_require(parser, CPP_EQ, "expected %<=%>"); init_loc = c_parser_peek_token(parser)->location; init_exp = c_parser_expr_no_commas(parser, NULL); init_exp = default_function_array_conversion(init_loc, init_exp); init = build_modify_expr(init_loc, decl, decl_exp.original_type, NOP_EXPR, init_loc, init_exp.value, init_exp.original_type); init = c_process_expr_stmt(init_loc, init); c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); } else { error_init: c_parser_error(parser, "expected iteration declaration or initialization"); c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); fail = true; goto parse_next; } /* Parse the loop condition. */ cond = NULL_TREE; if (c_parser_next_token_is_not(parser, CPP_SEMICOLON)) { location_t cond_loc = c_parser_peek_token(parser)->location; struct c_expr cond_expr = c_parser_binary_expression(parser, NULL); cond = cond_expr.value; cond = c_objc_common_truthvalue_conversion(cond_loc, cond); cond = c_fully_fold(cond, false, NULL); switch (cond_expr.original_code) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; default: /* * Can't be cond = error_mark_node, because we want to * preserve the location until c_finish_omp_for. */ cond = build1(NOP_EXPR, boolean_type_node, error_mark_node); break; } protected_set_expr_location(cond, cond_loc); } c_parser_skip_until_found(parser, CPP_SEMICOLON, "expected %<;%>"); /* Parse the increment expression. */ incr = NULL_TREE; if (c_parser_next_token_is_not(parser, CPP_CLOSE_PAREN)) { location_t incr_loc = c_parser_peek_token(parser)->location; incr = c_process_expr_stmt(incr_loc, c_parser_expression(parser).value); } c_parser_skip_until_found(parser, CPP_CLOSE_PAREN, "expected %<)%>"); if (decl == NULL || decl == error_mark_node || init == error_mark_node) fail = true; else { TREE_VEC_ELT(declv, i) = decl; TREE_VEC_ELT(initv, i) = init; TREE_VEC_ELT(condv, i) = cond; TREE_VEC_ELT(incrv, i) = incr; } parse_next: if (i == collapse - 1) break; /* * FIXME: OpenMP 3.0 draft isn't very clear on what exactly is * allowed in between the collapsed for loops to be still * considered perfectly nested. Hopefully the final version * clarifies this. For now handle (multiple) {'s and empty * statements. */ do { if (c_parser_next_token_is_keyword(parser, RID_FOR)) { c_parser_consume_token(parser); break; } else if (c_parser_next_token_is(parser, CPP_OPEN_BRACE)) { c_parser_consume_token(parser); bracecount++; } else if (bracecount && c_parser_next_token_is(parser, CPP_SEMICOLON)) c_parser_consume_token(parser); else { c_parser_error(parser, "not enough perfectly nested loops"); if (bracecount) { open_brace_parsed = true; bracecount--; } fail = true; collapse = 0; break; } } while (1); nbraces += bracecount; } save_break = c_break_label; c_break_label = size_one_node; save_cont = c_cont_label; c_cont_label = NULL_TREE; body = push_stmt_list(); if (open_brace_parsed) { location_t here = c_parser_peek_token(parser)->location; stmt = c_begin_compound_stmt(true); c_parser_compound_statement_nostart(parser); add_stmt(c_end_compound_stmt(here, stmt, true)); } else add_stmt(c_parser_c99_block_statement(parser)); if (c_cont_label) { tree t = build1(LABEL_EXPR, void_type_node, c_cont_label); SET_EXPR_LOCATION(t, loc); add_stmt(t); } body = pop_stmt_list(body); c_break_label = save_break; c_cont_label = save_cont; while (nbraces) { if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) { c_parser_consume_token(parser); nbraces--; } else if (c_parser_next_token_is(parser, CPP_SEMICOLON)) c_parser_consume_token(parser); else { c_parser_error(parser, "collapsed loops not perfectly nested"); while (nbraces) { location_t here = c_parser_peek_token(parser)->location; stmt = c_begin_compound_stmt(true); add_stmt(body); c_parser_compound_statement_nostart(parser); body = c_end_compound_stmt(here, stmt, true); nbraces--; } goto pop_scopes; } } /* * Only bother calling c_finish_omp_for if we haven't already * generated an error from the initialization parsing. */ if (!fail) { stmt = c_finish_omp_for(loc, declv, initv, condv, incrv, body, NULL); if (stmt) { if (par_clauses != NULL) { tree *c; for (c = par_clauses; *c;) if (OMP_CLAUSE_CODE(*c) != OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_CODE(*c) != OMP_CLAUSE_LASTPRIVATE) c = &OMP_CLAUSE_CHAIN(*c); else { for (i = 0; i < collapse; i++) if (TREE_VEC_ELT(declv, i) == OMP_CLAUSE_DECL(*c)) break; if (i == collapse) c = &OMP_CLAUSE_CHAIN(*c); else if (OMP_CLAUSE_CODE(*c) == OMP_CLAUSE_FIRSTPRIVATE) { error_at(loc, "iteration variable %qD should not be firstprivate", OMP_CLAUSE_DECL(*c)); *c = OMP_CLAUSE_CHAIN(*c); } else { /* * Copy lastprivate (decl) clause to * OMP_FOR_CLAUSES, change it to shared * (decl) in OMP_PARALLEL_CLAUSES. */ tree l = build_omp_clause(OMP_CLAUSE_LOCATION(*c), OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL(l) = OMP_CLAUSE_DECL(*c); OMP_CLAUSE_CHAIN(l) = clauses; clauses = l; OMP_CLAUSE_SET_CODE(*c, OMP_CLAUSE_SHARED); } } } OMP_FOR_CLAUSES(stmt) = clauses; } ret = stmt; } pop_scopes: while (for_block) { /* * FIXME diagnostics: LOC below should be the actual location of * this particular for block. We need to build a list of * locations to go along with FOR_BLOCK. */ stmt = c_end_compound_stmt(loc, TREE_VALUE(for_block), true); add_stmt(stmt); for_block = TREE_CHAIN(for_block); } return ret; } /* * OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop * * LOC is the location of the #pragma token. */ #define OMP_FOR_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_ORDERED) \ | (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (1u << PRAGMA_OMP_CLAUSE_COLLAPSE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_for(location_t loc, c_parser * parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses(parser, OMP_FOR_CLAUSE_MASK, "#pragma omp for"); block = c_begin_compound_stmt(true); ret = c_parser_omp_for_loop(loc, parser, clauses, NULL); block = c_end_compound_stmt(loc, block, true); add_stmt(block); return ret; } /* * OpenMP 2.5: # pragma omp master new-line structured-block * * LOC is the location of the #pragma token. */ static tree c_parser_omp_master(location_t loc, c_parser * parser) { c_parser_skip_to_pragma_eol(parser); return c_finish_omp_master(loc, c_parser_omp_structured_block(parser)); } /* * OpenMP 2.5: # pragma omp ordered new-line structured-block * * LOC is the location of the #pragma itself. */ static tree c_parser_omp_ordered(location_t loc, c_parser * parser) { c_parser_skip_to_pragma_eol(parser); return c_finish_omp_ordered(loc, c_parser_omp_structured_block(parser)); } /* * OpenMP 2.5: * * section-scope: { section-sequence } * * section-sequence: section-directive[opt] structured-block * section-sequence section-directive structured-block * * SECTIONS_LOC is the location of the #pragma omp sections. */ static tree c_parser_omp_sections_scope(location_t sections_loc, c_parser * parser) { tree stmt, substmt; bool error_suppress = false; location_t loc; loc = c_parser_peek_token(parser)->location; if (!c_parser_require(parser, CPP_OPEN_BRACE, "expected %<{%>")) { /* Avoid skipping until the end of the block. */ parser->error = false; return NULL_TREE; } stmt = push_stmt_list(); if (c_parser_peek_token(parser)->pragma_kind != PRAGMA_OMP_SECTION) { substmt = push_stmt_list(); while (1) { c_parser_statement(parser); if (c_parser_peek_token(parser)->pragma_kind == PRAGMA_OMP_SECTION) break; if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is(parser, CPP_EOF)) break; } substmt = pop_stmt_list(substmt); substmt = build1(OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION(substmt, loc); add_stmt(substmt); } while (1) { if (c_parser_next_token_is(parser, CPP_CLOSE_BRACE)) break; if (c_parser_next_token_is(parser, CPP_EOF)) break; loc = c_parser_peek_token(parser)->location; if (c_parser_peek_token(parser)->pragma_kind == PRAGMA_OMP_SECTION) { c_parser_consume_pragma(parser); c_parser_skip_to_pragma_eol(parser); error_suppress = false; } else if (!error_suppress) { error_at(loc, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = c_parser_omp_structured_block(parser); substmt = build1(OMP_SECTION, void_type_node, substmt); SET_EXPR_LOCATION(substmt, loc); add_stmt(substmt); } c_parser_skip_until_found(parser, CPP_CLOSE_BRACE, "expected %<#pragma omp section%> or %<}%>"); substmt = pop_stmt_list(stmt); stmt = make_node(OMP_SECTIONS); SET_EXPR_LOCATION(stmt, sections_loc); TREE_TYPE(stmt) = void_type_node; OMP_SECTIONS_BODY(stmt) = substmt; return add_stmt(stmt); } /* * OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline * sections-scope * * LOC is the location of the #pragma token. */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_sections(location_t loc, c_parser * parser) { tree block, clauses, ret; clauses = c_parser_omp_all_clauses(parser, OMP_SECTIONS_CLAUSE_MASK, "#pragma omp sections"); block = c_begin_compound_stmt(true); ret = c_parser_omp_sections_scope(loc, parser); if (ret) OMP_SECTIONS_CLAUSES(ret) = clauses; block = c_end_compound_stmt(loc, block, true); add_stmt(block); return ret; } /* * OpenMP 2.5: # pragma parallel parallel-clause new-line # pragma * parallel for parallel-for-clause new-line # pragma parallel sections * parallel-sections-clause new-line * * LOC is the location of the #pragma token. */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_COPYIN) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS)) static tree c_parser_omp_parallel(location_t loc, c_parser * parser) { enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL; const char *p_name = "#pragma omp parallel"; tree stmt, clauses, par_clause, ws_clause, block; unsigned int mask = OMP_PARALLEL_CLAUSE_MASK; if (c_parser_next_token_is_keyword(parser, RID_FOR)) { c_parser_consume_token(parser); p_kind = PRAGMA_OMP_PARALLEL_FOR; p_name = "#pragma omp parallel for"; mask |= OMP_FOR_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } else if (c_parser_next_token_is(parser, CPP_NAME)) { const char *p = IDENTIFIER_POINTER(c_parser_peek_token(parser)->value); if (strcmp(p, "sections") == 0) { c_parser_consume_token(parser); p_kind = PRAGMA_OMP_PARALLEL_SECTIONS; p_name = "#pragma omp parallel sections"; mask |= OMP_SECTIONS_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } } clauses = c_parser_omp_all_clauses(parser, mask, p_name); switch (p_kind) { case PRAGMA_OMP_PARALLEL: block = c_begin_omp_parallel(); c_parser_statement(parser); stmt = c_finish_omp_parallel(loc, clauses, block); break; case PRAGMA_OMP_PARALLEL_FOR: block = c_begin_omp_parallel(); c_split_parallel_clauses(loc, clauses, &par_clause, &ws_clause); c_parser_omp_for_loop(loc, parser, ws_clause, &par_clause); stmt = c_finish_omp_parallel(loc, par_clause, block); OMP_PARALLEL_COMBINED(stmt) = 1; break; case PRAGMA_OMP_PARALLEL_SECTIONS: block = c_begin_omp_parallel(); c_split_parallel_clauses(loc, clauses, &par_clause, &ws_clause); stmt = c_parser_omp_sections_scope(loc, parser); if (stmt) OMP_SECTIONS_CLAUSES(stmt) = ws_clause; stmt = c_finish_omp_parallel(loc, par_clause, block); OMP_PARALLEL_COMBINED(stmt) = 1; break; default: gcc_unreachable(); } return stmt; } /* * OpenMP 2.5: # pragma omp single single-clause[optseq] new-line * structured-block * * LOC is the location of the #pragma. */ #define OMP_SINGLE_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree c_parser_omp_single(location_t loc, c_parser * parser) { tree stmt = make_node(OMP_SINGLE); SET_EXPR_LOCATION(stmt, loc); TREE_TYPE(stmt) = void_type_node; OMP_SINGLE_CLAUSES(stmt) = c_parser_omp_all_clauses(parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single"); OMP_SINGLE_BODY(stmt) = c_parser_omp_structured_block(parser); return add_stmt(stmt); } /* * OpenMP 3.0: # pragma omp task task-clause[optseq] new-line * * LOC is the location of the #pragma. */ #define OMP_TASK_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_UNTIED) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED)) static tree c_parser_omp_task(location_t loc, c_parser * parser) { tree clauses, block; clauses = c_parser_omp_all_clauses(parser, OMP_TASK_CLAUSE_MASK, "#pragma omp task"); block = c_begin_omp_task(); c_parser_statement(parser); return c_finish_omp_task(loc, clauses, block); } /* * OpenMP 3.0: # pragma omp taskwait new-line */ static void c_parser_omp_taskwait(c_parser * parser) { location_t loc = c_parser_peek_token(parser)->location; c_parser_consume_pragma(parser); c_parser_skip_to_pragma_eol(parser); c_finish_omp_taskwait(loc); } /* Main entry point to parsing most OpenMP pragmas. */ static void c_parser_omp_construct(c_parser * parser) { enum pragma_kind p_kind; location_t loc; tree stmt; loc = c_parser_peek_token(parser)->location; p_kind = c_parser_peek_token(parser)->pragma_kind; c_parser_consume_pragma(parser); switch (p_kind) { case PRAGMA_OMP_ATOMIC: c_parser_omp_atomic(loc, parser); return; case PRAGMA_OMP_CRITICAL: stmt = c_parser_omp_critical(loc, parser); break; case PRAGMA_OMP_FOR: stmt = c_parser_omp_for(loc, parser); break; case PRAGMA_OMP_MASTER: stmt = c_parser_omp_master(loc, parser); break; case PRAGMA_OMP_ORDERED: stmt = c_parser_omp_ordered(loc, parser); break; case PRAGMA_OMP_PARALLEL: stmt = c_parser_omp_parallel(loc, parser); break; case PRAGMA_OMP_SECTIONS: stmt = c_parser_omp_sections(loc, parser); break; case PRAGMA_OMP_SINGLE: stmt = c_parser_omp_single(loc, parser); break; case PRAGMA_OMP_TASK: stmt = c_parser_omp_task(loc, parser); break; default: gcc_unreachable(); } if (stmt) gcc_assert(EXPR_LOCATION(stmt) != UNKNOWN_LOCATION); } /* * OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void c_parser_omp_threadprivate(c_parser * parser) { tree vars, t; location_t loc; c_parser_consume_pragma(parser); loc = c_parser_peek_token(parser)->location; vars = c_parser_omp_var_list_parens(parser, OMP_CLAUSE_ERROR, NULL); /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN(t)) { tree v = TREE_PURPOSE(t); /* * FIXME diagnostics: Ideally we should keep individual locations * for all the variables in the var list to make the following * errors more precise. Perhaps c_parser_omp_var_list_parens() * should construct a list of locations to go along with the var * list. */ /* * If V had already been marked threadprivate, it doesn't matter * whether it had been used prior to this point. */ if (TREE_CODE(v) != VAR_DECL) error_at(loc, "%qD is not a variable", v); else if (TREE_USED(v) && !C_DECL_THREADPRIVATE_P(v)) error_at(loc, "%qE declared %<threadprivate%> after first use", v); else if (!TREE_STATIC(v) && !DECL_EXTERNAL(v)) error_at(loc, "automatic variable %qE cannot be %<threadprivate%>", v); else if (TREE_TYPE(v) == error_mark_node) ; else if (!COMPLETE_TYPE_P(TREE_TYPE(v))) error_at(loc, "%<threadprivate%> %qE has incomplete type", v); else { if (!DECL_THREAD_LOCAL_P(v)) { DECL_TLS_MODEL(v) = decl_default_tls_model(v); /* * If rtl has been already set for this var, call * make_decl_rtl once again, so that encode_section_info * has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P(v)) make_decl_rtl(v); } C_DECL_THREADPRIVATE_P(v) = 1; } } c_parser_skip_to_pragma_eol(parser); } /* Parse a single source file. */ void c_parse_file(void) { /* * Use local storage to begin. If the first token is a pragma, parse * it. If it is #pragma GCC pch_preprocess, then this will load a PCH * file which will cause garbage collection. */ c_parser tparser; memset(&tparser, 0, sizeof tparser); the_parser = &tparser; if (c_parser_peek_token(&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS) c_parser_pragma_pch_preprocess(&tparser); the_parser = GGC_NEW(c_parser); *the_parser = tparser; /* Initialize EH, if we've been told to do so. */ if (flag_exceptions) using_eh_for_cleanups(); c_parser_translation_unit(the_parser); the_parser = NULL; } #include "gt-c-parser.h" #ifdef __cplusplus } /* extern "C" */ #endif
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for ' DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for ' DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse ' void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
convolution_3x3_fp16.c
/* * Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* CSI-NN2 version 1.12.x */ /* the conditions for using winograd convolution in_channel >= 16 out_channel >= 16 input_height <= 120 input_width <= 120 */ #include "csi_c906.h" /* padding input for winograd input transform , and change memory layout to [n c/8 h w 8] input layout: [n c h w] input_padded layout: [n c/8 h w 8] constrain: input channel % 8 = 0 */ void csi_c906_pad_input_pack1to8_fp16(const __fp16 *input, __fp16 *input_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad_top, int pad_left) { int inc8 = inc / 8; int padded_hw = padded_h * padded_w; __fp16 *pad_ptr = input_padded; __fp16 *inp_ptr = (__fp16 *)input; int resi_h = padded_h - pad_top - inh; // remain to pad on h (pad_down) int resi_w = padded_w - pad_left - inw; // remain to pad on w (pad_right) asm volatile( "vsetvli zero, zero, e16, m1\n\t" "vmv.v.x v2, zero\n\t" // clear v2, for memset value 0 "mulw t1, %6, %7\n\t" // pad_top * padded_w "mulw t2, %6, %9\n\t" // pad_down * padded_w "mulw t0, %3, %4\n\t" // input_size per_channel "slli t0, t0, 1\n\t" // load stride = input_size * 2 "slli t6, t0, 3\n\t" // t6 = input_size * 8 * 2 "1:\n\t" // channel loop [inc/8] "mv a0, %0\n\t" // update input_addr "mv t5, %3\n\t" // t5 = in_h "beqz %7, 3f\n\t" // if pad_top = 0 "mv t3, t1\n\t" // t3 = num to memset "2:\n\t" // pad h_top "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 2b\n\t" "3:\n\t" // pad h_mid "mv t4, %4\n\t" // t4 = in_w "beqz %8, 5f\n\t" // if pad_left = 0 "mv t3, %8\n\t" // t3 = pad_left "4:\n\t" // pad w_left "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 4b\n\t" "5:\n\t" // pad w_mid "vlse.v v4, (a0), t0\n\t" "addi a0, a0, 2\n\t" "vse.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t4, t4, -1\n\t" "bnez t4, 5b\n\t" "beqz %10, 7f\n\t" // if pad_right = 0 "mv t3, %10\n\t" // t3 = pad_right "6:\n\t" // pad w_right "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 6b\n\t" "7:\n\t" "addi t5, t5, -1\n\t" "bnez t5, 3b\n\t" "beqz %9, 9f\n\t" // if pad_down = 0 "mv t3, t2\n\t" // t3 = num to memset 0 "8:\n\t" // pad h_down "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 8b\n\t" "9:\n\t" "add %0, %0, t6\n\t" // input_data jump to next 8 channel "addi %2, %2, -1\n\t" "bnez %2, 1b\n\t" :"=r"(inp_ptr), // %0 "=r"(pad_ptr), // %1 "=r"(inc8), // %2 "=r"(inh), // %3 "=r"(inw), // %4 "=r"(padded_hw), // %5 "=r"(padded_w), // %6 "=r"(pad_top), // %7 "=r"(pad_left), // %8 "=r"(resi_h), // %9 "=r"(resi_w) // %10 :"0"(inp_ptr), "1"(pad_ptr), "2"(inc8), "3"(inh), "4"(inw), "5"(padded_hw), "6"(padded_w), "7"(pad_top), "8"(pad_left), "9"(resi_h), "10"(resi_w) :"cc", "memory", "v2", "v4", "a0", "t0", "t1", "t2", "t3", "t4", "t5", "t6" ); } void csi_c906_crop_output_pack8to1_fp16(const __fp16 *output_trans, __fp16 *output, int out_c, int out_h, int out_w, int wino_h, int wino_w) { int out_c8 = out_c / 8; __fp16 *out_tm_ptr = (__fp16 *)output_trans; __fp16 *out_ptr = output; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mulw t0, %3, %4\n\t" // output_size per_channel "slli t0, t0, 1\n\t" // store_stride = output_size * 2 "slli t3, t0, 3\n\t" // t3 = output_size * 8 * 2 "slli t4, %6, 4\n\t" // t4 = wino_w * 8 * 2 "mulw t5, %5, %6\n\t" // crop_size per_channel "slli t5, t5, 4\n\t" // t5 = crop_size * 8 * 2 "1:\n\t" // channel loop [out_ch / 8] "mv a1, %1\n\t" // update output_addr "mv a0, %0\n\t" // update crop_addr per-channel "mv t1, %3\n\t" // t1 = out_h "2:\n\t" // crop h "mv t2, %4\n\t" // t2 = out_w "mv s1, a0\n\t" // update crop_addr per-row "3:\n\t" // crop w "vle.v v2, (s1)\n\t" "addi s1, s1, 16\n\t" "vsse.v v2, (a1), t0\n\t" "addi a1, a1, 2\n\t" "addi t2, t2, -1\n\t" "bnez t2, 3b\n\t" "add a0, a0, t4\n\t" // crop-data jump to next row "addi t1, t1, -1\n\t" "bnez t1, 2b\n\t" "4:\n\t" "add %1, %1, t3\n\t" // output_data jump to next 8 channel "add %0, %0, t5\n\t" // crop-data jump to next 8 channel "addi %2, %2, -1\n\t" "bnez %2, 1b\n\t" :"=r"(out_tm_ptr), // %0 "=r"(out_ptr), // %1 "=r"(out_c8), // %2 "=r"(out_h), // %3 "=r"(out_w), // %4 "=r"(wino_h), // %5 "=r"(wino_w) // %6 :"0"(out_tm_ptr), "1"(out_ptr), "2"(out_c8), "3"(out_h), "4"(out_w), "5"(wino_h), "6"(wino_w) :"cc", "memory", "v2", "v3", "a0", "a1", "s1", "t0", "t1", "t2", "t3", "t4", "t5" ); } /* constrain: output channel % 8 = 0 input channel % 8 = 0 kernel before: [O I 3*3] kernel after : [O/8 8*8 I 8] */ void csi_c906_conv3x3s1_winograd64_transform_kernel_pack8_fp16(struct csi_tensor *o_kernel, struct csi_tensor *t_kernel) { int32_t outch = o_kernel->dim[0]; int32_t inch = o_kernel->dim[1]; __fp16 *kernel_data = (__fp16 *)o_kernel->data; // for kernel transform buf, 3x3 --> 8x8 __fp16 *kernel_tm = (__fp16 *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(__fp16)); // kernel transform matrix: G const __fp16 ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; // const __fp16 ktm[8][3] = { // {1.0f, 0.0f, 0.0f}, // {-2.0f / 9, -2.0f / 9, -2.0f / 9}, // {-2.0f / 9, 2.0f / 9, -2.0f / 9}, // {1.0f / 90, 1.0f / 45, 2.0f / 45}, // {1.0f / 90, -1.0f / 45, 2.0f / 45}, // {32.0f / 45, 16.0f / 45, 8.0f / 45}, // {32.0f / 45, -16.0f / 45, 8.0f / 45}, // {0.0f, 0.0f, 1.0f} // }; csi_tensor_copy(t_kernel, o_kernel); for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const __fp16* kernel0 = kernel_data + p * inch * 9 + q * 9; __fp16* kernel_tmp = kernel_tm + p * inch * 64 + q * 64; // transform kernel const __fp16 *k0 = kernel0; const __fp16 *k1 = kernel0 + 3; const __fp16 *k2 = kernel0 + 6; // h : first compute the transport matrix tmp = (g * GT)T __fp16 tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 8; j++) { __fp16* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tmp[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd64 __fp16 *kernel_tm_pack8 = (__fp16 *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(__fp16)); t_kernel->data = kernel_tm_pack8; for (int oc = 0; oc < outch / 8; oc++) { __fp16 *g0 = kernel_tm_pack8 + oc * 64 * inch * 8; const __fp16 *k0 = kernel_tm + oc * 64 * inch * 8; const __fp16 *k1 = k0 + 64 * inch; const __fp16 *k2 = k1 + 64 * inch; const __fp16 *k3 = k2 + 64 * inch; const __fp16 *k4 = k3 + 64 * inch; const __fp16 *k5 = k4 + 64 * inch; const __fp16 *k6 = k5 + 64 * inch; const __fp16 *k7 = k6 + 64 * inch; for (int k = 0; k < 64; k++) { __fp16 *g00 = g0 + k * inch * 8; for (int ic = 0; ic < inch / 8; ic++) { for (int i = 0; i < 8; i++) { const __fp16 *k00 = k0 + (ic * 8 + i) * 64; const __fp16 *k10 = k1 + (ic * 8 + i) * 64; const __fp16 *k20 = k2 + (ic * 8 + i) * 64; const __fp16 *k30 = k3 + (ic * 8 + i) * 64; const __fp16 *k40 = k4 + (ic * 8 + i) * 64; const __fp16 *k50 = k5 + (ic * 8 + i) * 64; const __fp16 *k60 = k6 + (ic * 8 + i) * 64; const __fp16 *k70 = k7 + (ic * 8 + i) * 64; g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } csi_mem_free(kernel_tm); } /* constrain: output channel % 8 = 0 input channel % 8 = 0 */ int csi_c906_conv3x3s1_winograd64_pack8_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { // uint64_t start_time, end_time; // start_time = csi_get_timespec(); __fp16 *input_data = (__fp16 *)input->data; __fp16 *output_data = (__fp16 *)output->data; __fp16 *kernel_data = (__fp16 *)params->conv_extra.kernel_tm->data; __fp16 *bias_data = (__fp16 *)bias->data; // param int kernel_h = kernel->dim[2]; int kernel_w = kernel->dim[3]; int stride_h = params->stride_height; int stride_w = params->stride_width; int dilation_h = params->dilation_height; int dilation_w = params->dilation_width; int pad_left = params->pad_left; int pad_top = params->pad_top; int batch = input->dim[0]; int in_c = input->dim[1]; int in_h = input->dim[2]; int in_w = input->dim[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = kernel->dim[0]; int out_h = output->dim[2]; int out_w = output->dim[3]; int output_size = out_c * out_h * out_w; // winograd param int block_h = (out_h + 5) / 6; int block_w = (out_w + 5) / 6; int padded_in_h = block_h * 6 + 2; // block * 4 for alignment with 4,kernel = 3 * 3 ,stride = 1,thus input_size + 2 int padded_in_w = block_w * 6 + 2; int padded_in_hw = padded_in_h * padded_in_w; // element size after padding per channel /****************************** bias *****************************/ bool flag_bias = 1; // default: conv2d layer include bias if (bias_data == NULL) { flag_bias = 0; bias_data = (__fp16 *)csi_mem_alloc(out_c * sizeof(__fp16)); } for(int n = 0; n < batch; n++) { // pad buffer: [in_c/8 h w 8] __fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * padded_in_hw * sizeof(__fp16)); // pad input csi_c906_pad_input_pack1to8_fp16(input_data, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_top, pad_left); input_data += input_size; // input transform buffer1: [in_ch/8, 64, blocks, 8] __fp16 *input_tm1_buf = (__fp16 *)csi_mem_alloc(in_c * block_h * block_w * 8 * 8 * sizeof(__fp16)); /****************************** transform input *****************************/ /* BT = { { 1 0 -5.25 0 5.25 0 -1 0 }; { 0 1 1 -4.25 -4.25 1 1 0 }; { 0 -1 1 4.25 -4.25 -1 1 0 }; { 0 0.5 0.25 -2.5 -1.25 2 1 0 }; { 0 -0.5 0.25 2.5 -1.25 -2 1 0 }; { 0 2 4 -2.5 -5 0.5 1 0 }; { 0 -2 4 2.5 -5 -0.5 1 0 }; { 0 -1 0 5.25 0 -5.25 0 1 } }; */ // int in_h_tm = block_h * 8; // input height after transform // int in_w_tm = block_w * 8; int tiles = block_h * block_w; #pragma omp parallel for num_threads(1) for(int q = 0; q < in_c / 8; q++) { __fp16 *img0 = input_padd_buf + q * padded_in_h * padded_in_w * 8; // feature map after padding - q channel __fp16 *img0_tm = input_tm1_buf + q * 64 * tiles * 8; // transform and interleave - q channel __fp16 *tmp = (__fp16 *)csi_mem_alloc(8 * 8 * 8 * sizeof(__fp16)); // __fp16 tmp[512] = {0.0}; // ?????? for(int i = 0; i < block_h; i++) { for(int j = 0; j < block_w; j++) { __fp16 *r0 = img0 + (i * padded_in_w * 6 + j * 6) * 8; // feature map after padding 8*8 start addr __fp16 *r0_tm = img0_tm + (i * block_w + j) * 8; // input_tm1 8*8 block start addr __fp16 ratio[] = {5.25, -4.25, 0.25, -1.25, 4.0, 0.5, -2.5, 2.0}; // note: in fact cannot be output constrain __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 8\n\t" // m = 8 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = padded_in_w * 8 * 2bytes "flh fa0, 0(%3)\n\t" // fa0 = 5.25 "flh fa1, 2(%3)\n\t" // fa1 = -4.25 "flh fa2, 4(%3)\n\t" // fa2 = 0.25 "flh fa3, 6(%3)\n\t" // fa3 = -1.25 "flh fa4, 8(%3)\n\t" // fa4 = 4.0 "flh fa5, 10(%3)\n\t" // fa5 = 0.5 "flh fa6, 12(%3)\n\t" // fa6 = -2.5 "flh fa7, 14(%3)\n\t" // fa7 = 2.0 "1:\n\t" "mv s1, %0\n\t" // s1 = r00 addr "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 128\n\t" // tmp[1][m] "addi a2, a1, 128\n\t" // tmp[2][m] "addi a3, a2, 128\n\t" // tmp[3][m] "addi a4, a3, 128\n\t" // tmp[4][m] "addi a5, a4, 128\n\t" // tmp[5][m] "addi a6, a5, 128\n\t" // tmp[6][m] "addi a7, a6, 128\n\t" // tmp[7][m] "vle.v v0, (s1)\n\t" // r00 "addi s1, s1, 16\n\t" "vle.v v1, (s1)\n\t" // r01 "addi s1, s1, 16\n\t" "vle.v v2, (s1)\n\t" // r02 "addi s1, s1, 16\n\t" "vle.v v3, (s1)\n\t" // r03 "addi s1, s1, 16\n\t" "vle.v v4, (s1)\n\t" // r04 "addi s1, s1, 16\n\t" "vle.v v5, (s1)\n\t" // r05 "addi s1, s1, 16\n\t" "vle.v v6, (s1)\n\t" // r06 "addi s1, s1, 16\n\t" "vle.v v7, (s1)\n\t" // r07 "addi s1, s1, 16\n\t" "vmv.v.v v10, v6\n\t" //--------------------------------------------- "vfsub.vv v8, v4, v2\n\t" // r04 - r02 "vfsub.vv v9, v3, v5\n\t" // r03 - r05 "vfsub.vv v24, v0, v6\n\t" // r00 - r06 "vfsub.vv v31, v7, v1\n\t" // r07 - r01 "vfmacc.vf v10, fa2, v2\n\t" // r06 + r02 * 0.25f "vfmul.vf v11, v1, fa5\n\t" // r01 * 0.5f "vfmul.vf v12, v1, fa7\n\t" // r01 * 2.0f "vfmacc.vf v24, fa0, v8\n\t" // r00 - r06 + 5.25 * (r04 - r02) = tmp[0][m] "vfmacc.vf v31, fa0, v9\n\t" // r07 - r01 + 5.25 * (r03 - r05) = tmp[7][m] //--------------------------------------------- "vfadd.vv v8, v2, v6\n\t" // r02 + r06 "vfadd.vv v9, v1, v5\n\t" // r01 + r05 "vfmacc.vf v11, fa6, v3\n\t" // r01 * 0.5f - r03 * 2.5f "vfmacc.vf v12, fa6, v3\n\t" // r01 * 2.f - r03 * 2.5f "vfmacc.vf v2, fa3, v4\n\t" // r02 - r04 * 1.25f 注意 "vfmacc.vf v10, fa3, v4\n\t" // r06 + r02 * 0.25f - r04 * 1.25f = tmp34a "vfmacc.vf v8, fa1, v4\n\t" // r02 + r06 - r04 * 4.25f = tmp12a "vfmacc.vf v9, fa1, v3\n\t" // r01 + r05 - r03 * 4.25f = tmp12b "vfmacc.vf v11, fa7, v5\n\t" // r01 * 0.5f - r03 * 2.5f + r05 * 2.0 = tmp34b "vfmacc.vf v12, fa5, v5\n\t" // r01 * 2.f - r03 * 2.5f + r05 * 0.5 = tmp56b "vse.v v24, (a0)\n\t" "vse.v v31, (a7)\n\t" "vfadd.vv v25, v8, v9\n\t" // tmp12a + tmp12b = tmp[1][m] "vfsub.vv v26, v8, v9\n\t" // tmp12a - tmp12b = tmp[2][m] //--------------------------------------------- "vfmacc.vf v6, fa4, v2\n\t" // r06 + (r02 - r04 * 1.25f) * 4 = tmp56a "vfadd.vv v27, v10, v11\n\t" // tmp34a + tmp34b = tmp[3][m] "vfsub.vv v28, v10, v11\n\t" // tmp34a - tmp34b = tmp[4][m] "vfadd.vv v29, v6, v12\n\t" // tmp56a + tmp56b = tmp[5][m] "vfsub.vv v30, v6, v12\n\t" // tmp56a - tmp56b = tmp[6][m] "vse.v v25, (a1)\n\t" "vse.v v26, (a2)\n\t" "vse.v v27, (a3)\n\t" "vse.v v28, (a4)\n\t" "vse.v v29, (a5)\n\t" "vse.v v30, (a6)\n\t" //--------------------------------------------- "add %0, %0, t1\n\t" // padding feature map 8*8 next line addr "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 8\n\t" // m = 8 "slli t1, %5, 4\n\t" // t1 = tiles * 8 * 2 bytes "slli t2, %5, 7\n\t" // t2 = tiles * 8 * 8 * 2 bytes "3:\n\t" "mv a0, %1\n\t" // r0_tm_0 "add a1, a0, t1\n\t" // r0_tm_1 "add a2, a1, t1\n\t" // r0_tm_2 "add a3, a2, t1\n\t" // r0_tm_3 "add a4, a3, t1\n\t" // r0_tm_4 "add a5, a4, t1\n\t" // r0_tm_5 "add a6, a5, t1\n\t" // r0_tm_6 "add a7, a6, t1\n\t" // r0_tm_7 "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vle.v v6, (t5)\n\t" // tmp[m][6] "addi t5, t5, 16\n\t" "vle.v v7, (t5)\n\t" // tmp[m][7] "addi t5, t5, 16\n\t" "vmv.v.v v10, v6\n\t" //--------------------------------------------- "vfsub.vv v8, v4, v2\n\t" // tmp04 - tmp02 (tmp[m][4] - tmp[m][2]) "vfsub.vv v9, v3, v5\n\t" // tmp03 - tmp05 "vfsub.vv v24, v0, v6\n\t" // tmp00 - tmp06 "vfsub.vv v31, v7, v1\n\t" // tmp07 - tmp01 "vfmacc.vf v10, fa2, v2\n\t" // tmp06 + tmp02 * 0.25f "vfmul.vf v11, v1, fa5\n\t" // tmp01 * 0.5f "vfmul.vf v12, v1, fa7\n\t" // tmp01 * 2.0f "vfmacc.vf v24, fa0, v8\n\t" // tmp00 - tmp06 + 5.25 * (tmp04 - tmp02) = r0_tm_0[m] "vfmacc.vf v31, fa0, v9\n\t" // tmp07 - tmp01 + 5.25 * (tmp03 - tmp05) = r0_tm_7[m] //--------------------------------------------- "vfadd.vv v8, v2, v6\n\t" // tmp02 + tmp06 "vfadd.vv v9, v1, v5\n\t" // tmp01 + tmp05 "vfmacc.vf v11, fa6, v3\n\t" // tmp01 * 0.5f - tmp03 * 2.5f "vfmacc.vf v12, fa6, v3\n\t" // tmp01 * 2.f - tmp03 * 2.5f "vfmacc.vf v2, fa3, v4\n\t" // tmp02 - tmp04 * 1.25f "vfmacc.vf v10, fa3, v4\n\t" // tmp06 + tmp02 * 0.25f - tmp04 * 1.25f = tmp34a "vfmacc.vf v8, fa1, v4\n\t" // tmp02 + tmp06 - tmp04 * 4.25f = tmp12a "vfmacc.vf v9, fa1, v3\n\t" // tmp01 + tmp05 - tmp03 * 4.25f = tmp12b "vfmacc.vf v11, fa7, v5\n\t" // tmp01 * 0.5f - tmp03 * 2.5f + tmp05 * 2.0 = tmp34b "vfmacc.vf v12, fa5, v5\n\t" // tmp01 * 2.f - tmp03 * 2.5f + tmp05 * 0.5 = tmp56b "vse.v v24, (a0)\n\t" "vse.v v31, (a7)\n\t" "vfadd.vv v25, v8, v9\n\t" // tmp12a + tmp12b = r0_tm_1[m] "vfsub.vv v26, v8, v9\n\t" // tmp12a - tmp12b = r0_tm_2[m] //--------------------------------------------- "vfmacc.vf v6, fa4, v2\n\t" // tmp06 + (tmp02 - tmp04 * 1.25f) * 4 = tmp56a "vfadd.vv v27, v10, v11\n\t" // tmp34a + tmp34b = r0_tm_3[m] "vfsub.vv v28, v10, v11\n\t" // tmp34a - tmp34b = r0_tm_4[m] "vfadd.vv v29, v6, v12\n\t" // tmp56a + tmp56b = r0_tm_5[m] "vfsub.vv v30, v6, v12\n\t" // tmp56a - tmp56b = r0_tm_6[m] "vse.v v25, (a1)\n\t" "vse.v v26, (a2)\n\t" "vse.v v27, (a3)\n\t" "vse.v v28, (a4)\n\t" "vse.v v29, (a5)\n\t" "vse.v v30, (a6)\n\t" "add %1, %1, t2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(r0), // %0 "=r"(r0_tm), // %1 "=r"(tmp), // %2 "=r"(ratio_ptr), // %3 "=r"(padded_in_w), // %4 "=r"(tiles) // %5 :"0"(r0), "1"(r0_tm), "2"(tmp), "3"(ratio_ptr), "4"(padded_in_w), "5"(tiles) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7" ); } } csi_mem_free(tmp); } csi_mem_free(input_padd_buf); /*********************************** dot ***************************************/ // reorder input_tm1_buf __fp16 *input_tm2_buf = (__fp16 *)csi_mem_alloc(64 * tiles * in_c * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int r = 0; r < 64; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // input_tm2 r channel data int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; //---------------------------- // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8 * 1]; // tm2[2] = tm1[l + 8 * 2]; // tm2[3] = tm1[l + 8 * 3]; // tm2[4] = tm1[l + 8 * 4]; // tm2[5] = tm1[l + 8 * 5]; // tm2[6] = tm1[l + 8 * 6]; // tm2[7] = tm1[l + 8 * 7]; // tm2 += 8; // } // tm1 += 64 * tiles * 8; // } //----------------------------- asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v2, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v3, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v4, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v5, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v6, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v7, (a0)\n\t" "vsseg8e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 128\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "a0", "t1", "t2" ); } for (; t + 3 < tiles; t += 4) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8 * 1]; // tm2[2] = tm1[l + 8 * 2]; // tm2[3] = tm1[l + 8 * 3]; // tm2 += 4; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v2, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v3, (a0)\n\t" "vsseg4e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 64\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "a0", "t1", "t2" ); } for (; t + 1 < tiles; t += 2) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8]; // tm2 += 2; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "vsseg2e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 32\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "a0", "t1", "t2" ); } for (; t < tiles; t++) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2++; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vse.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 16\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "a0", "t1", "t2" ); } } csi_mem_free(input_tm1_buf); // output_dot_buf: [out_c/8, 64, blocks, 8] __fp16 *output_dot_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 8 * 8 * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *output0_tm = output_dot_buf + p * 64 * tiles * 8; __fp16 *kernel0_tm = kernel_data + p * 64 * in_c * 8; for (int r = 0; r < 64; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // img_tm2 第r个channel int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" "vmv.v.x v4, zero\n\t" "vmv.v.x v5, zero\n\t" "vmv.v.x v6, zero\n\t" "vmv.v.x v7, zero\n\t" // clear "1:\n\t" "vle.v v8, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "flh fa4, 8(%0)\n\t" "flh fa5, 10(%0)\n\t" "flh fa6, 12(%0)\n\t" "flh fa7, 14(%0)\n\t" "addi %0, %0, 16\n\t" "vfmacc.vf v0, fa0, v8\n\t" "vfmacc.vf v1, fa1, v8\n\t" "vfmacc.vf v2, fa2, v8\n\t" "vfmacc.vf v3, fa3, v8\n\t" "vfmacc.vf v4, fa4, v8\n\t" "vfmacc.vf v5, fa5, v8\n\t" "vfmacc.vf v6, fa6, v8\n\t" "vfmacc.vf v7, fa7, v8\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v4, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v5, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v6, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v7, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", "t0" ); } for (; t + 3 < tiles; t += 4) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" // clear "1:\n\t" "vle.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "addi %0, %0, 8\n\t" "vfmacc.vf v0, fa0, v4\n\t" "vfmacc.vf v1, fa1, v4\n\t" "vfmacc.vf v2, fa2, v4\n\t" "vfmacc.vf v3, fa3, v4\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "fa0", "fa1", "fa2", "fa3", "t0" ); } for (; t + 1 < tiles; t += 2) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" // clear "1:\n\t" "vle.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "addi %0, %0, 4\n\t" "vfmacc.vf v0, fa0, v2\n\t" "vfmacc.vf v1, fa1, v2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "fa0", "fa1", "t0" ); } for (; t < tiles; t++) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c= "vmv.v.x v0, zero\n\t" // clear "1:\n\t" "vle.v v1, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "addi %0, %0, 2\n\t" "vfmacc.vf v0, fa0, v1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "fa0", "t0" ); } } } csi_mem_free(input_tm2_buf); /*************************** transform output ****************************/ // output_tm1_buf: [out_c/8, out_h6, out_w6, 8] __fp16 *output_tm1_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); /* AT = { { 1 1 1 1 1 1 1 0 }; { 0 1 -1 2 -2 1/2 -1/2 0 }; { 0 1 1 4 4 1/4 1/4 0 }; { 0 1 -1 8 -8 1/8 -1/8 0 }; { 0 1 1 16 16 1/16 1/16 0 }; { 0 1 -1 32 -32 1/32 -1/32 1 } }; AT = { { 1 1 1 1 1 32 32 0 }; { 0 1 -1 2 -2 16 -16 0 }; { 0 1 1 4 4 8 8 0 }; { 0 1 -1 8 -8 4 -4 0 }; { 0 1 1 16 16 2 2 0 }; { 0 1 -1 32 -32 1 -1 1 } }; */ #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *bias_tmp = bias_data + p * 8; __fp16 *out0_tm = output_dot_buf + p * 64 * block_h * block_w * 8; // 输出转换前/dot后 第p个channel __fp16 *out0 = output_tm1_buf + p * 6*block_h * 6*block_w * 8; // 转换后输出 第p个channel __fp16 *tmp1 = (__fp16 *)csi_mem_alloc(6 * 8 * 8 * sizeof(__fp16)); // __fp16 tmp[6][8][8]; int out_w6 = block_w * 6; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { __fp16 *output0_tm_0 = out0_tm + (i * block_w + j) * 8; // 8*8 起始地址 __fp16 *output0 = out0 + (i * block_w * 6 * 6 + j * 6) * 8; // 输出 6*6 的起始地址 __fp16 ratio[] = {2.0, 4.0, 8.0, 16.0, 32.0}; __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 8\n\t" // m = 8 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = tiles * 8 * 2 "slli t2, %4, 7\n\t" // t2 = tiles * 8 * 8 * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 2 "flh fa1, 2(%3)\n\t" // fa1 = 4 "flh fa2, 4(%3)\n\t" // fa2 = 8 "flh fa3, 6(%3)\n\t" // fa3 = 16 "flh fa4, 8(%3)\n\t" // fa4 = 32 "mv s1, %0\n\t" "1:\n\t" // shape : [6 * 8] * [8 * 8] = [6 * 8] "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 128\n\t" // tmp[1][m] "addi a2, a1, 128\n\t" // tmp[2][m] "addi a3, a2, 128\n\t" // tmp[3][m] "addi a4, a3, 128\n\t" // tmp[4][m] "addi a5, a4, 128\n\t" // tmp[5][m] "vle.v v0, (s1)\n\t" // r00 "add s1, s1, t1\n\t" "vle.v v1, (s1)\n\t" // r01 "add s1, s1, t1\n\t" "vle.v v2, (s1)\n\t" // r02 "add s1, s1, t1\n\t" "vle.v v3, (s1)\n\t" // r03 "add s1, s1, t1\n\t" "vle.v v4, (s1)\n\t" // r04 "add s1, s1, t1\n\t" "vle.v v5, (s1)\n\t" // r05 "add s1, s1, t1\n\t" "vle.v v6, (s1)\n\t" // r06 "add s1, s1, t1\n\t" "vle.v v7, (s1)\n\t" // r07 "add s1, s1, t1\n\t" //--------------------------------------------- "vfadd.vv v8, v1, v2\n\t" // r01 + r02 = tmp024a "vfsub.vv v9, v1, v2\n\t" // r01 - r02 = tmp135a "vfadd.vv v10, v3, v4\n\t" // r03 + r04 = tmp024b "vfsub.vv v11, v3, v4\n\t" // r03 - r04 = tmp135b "vfadd.vv v12, v5, v6\n\t" // r05 + r06 = tmp024c "vfsub.vv v13, v5, v6\n\t" // r05 - r06 = tmp135c "vfadd.vv v0, v0, v8\n\t" // r00 + tmp024a "vfadd.vv v7, v7, v9\n\t" // r07 + tmp135a "vmv.v.v v14, v10\n\t" // v14 = tmp024b "vmv.v.v v26, v8\n\t" // v26 = tmp024a "vmv.v.v v28, v8\n\t" // v28 = tmp024a "vfmacc.vf v26, fa1, v10\n\t" // tmp024a + tmp024b * 4 "vfmacc.vf v14, fa4, v12\n\t" // tmp024b + tmp024c * 32 "vfmacc.vf v28, fa3, v10\n\t" // tmp024a + tmp024b * 16 "vmv.v.v v15, v13\n\t" // v15 = tmp135c "vmv.v.v v25, v9\n\t" // v25 = tmp135a "vmv.v.v v27, v9\n\t" // v27 = tmp135a "vfadd.vv v24, v0, v14\n\t" // r00 + tmp024a + tmp024b + tmp024c * 32 = tmp[0][m] "vfmacc.vf v25, fa0, v11\n\t" // tmp135a + tmp135b * 2 "vfmacc.vf v27, fa2, v11\n\t" // tmp135a + tmp135b * 8 //--------------------------------------------- "vse.v v24, (a0)\n\t" "vfmacc.vf v26, fa2, v12\n\t" // tmp024a + tmp024b * 4 + tmp024c * 8 = tmp[2][m] "vfmacc.vf v28, fa0, v12\n\t" // tmp024a + tmp024b * 16 + tmp024c + tmp024c = tmp[4][m] "vfmacc.vf v15, fa4, v11\n\t" // tmp135b * 32 + tmp135c "vse.v v26, (a2)\n\t" "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v25, fa3, v13\n\t" // tmp135a + tmp135b * 2 + tmp135c * 16 = tmp[1][m] "vfmacc.vf v27, fa1, v13\n\t" // tmp135a + tmp135b * 8 + tmp135c * 4 = tmp[3][m] "vfadd.vv v29, v7, v15\n\t" // r07 + tmp135a + tmp135b * 32 + tmp135c "vse.v v25, (a1)\n\t" "vse.v v27, (a3)\n\t" "vse.v v29, (a5)\n\t" "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 6\n\t" // m = 6 "slli t1, %5, 4\n\t" // t1 = out_w6 * 8 * 2bytes "vle.v v16, (%6)\n\t" // load 8 channel bias data "3:\n\t" // shape : [6 * 8] * [6 * 8] = [6 * 6] "mv a0, %1\n\t" "addi a1, a0, 16\n\t" "addi a2, a1, 16\n\t" "addi a3, a2, 16\n\t" "addi a4, a3, 16\n\t" "addi a5, a4, 16\n\t" "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vle.v v6, (t5)\n\t" // tmp[m][6] "addi t5, t5, 16\n\t" "vle.v v7, (t5)\n\t" // tmp[m][7] "addi t5, t5, 16\n\t" //--------------------------------------------- "vfadd.vv v8, v1, v2\n\t" // tmp[m][1] + tmp[m][2] = tmp024a "vfsub.vv v9, v1, v2\n\t" // tmp[m][1] - tmp[m][2] = tmp135a "vfadd.vv v10, v3, v4\n\t" // tmp[m][3] + tmp[m][4] = tmp024b "vfsub.vv v11, v3, v4\n\t" // tmp[m][3] - tmp[m][4] = tmp135b "vfadd.vv v12, v5, v6\n\t" // tmp[m][5] + tmp[m][6] = tmp024c "vfsub.vv v13, v5, v6\n\t" // tmp[m][5] - tmp[m][6] = tmp135c "vfadd.vv v0, v0, v8\n\t" // tmp[m][0] + tmp024a "vfadd.vv v7, v7, v9\n\t" // tmp[m][7] + tmp135a "vmv.v.v v14, v10\n\t" // v14 = tmp024b "vmv.v.v v26, v8\n\t" // v26 = tmp024a "vmv.v.v v28, v8\n\t" // v28 = tmp024a "vfmacc.vf v26, fa1, v10\n\t" // tmp024a + tmp024b * 4 "vfmacc.vf v14, fa4, v12\n\t" // tmp024b + tmp024c * 32 "vfmacc.vf v28, fa3, v10\n\t" // tmp024a + tmp024b * 16 "vmv.v.v v15, v13\n\t" // v15 = tmp135c "vmv.v.v v25, v9\n\t" // v25 = tmp135a "vmv.v.v v27, v9\n\t" // v27 = tmp135a "vfadd.vv v24, v0, v14\n\t" // tmp[m][0] + tmp024a + tmp024b + tmp024c * 32 = tmp[0][m] "vfmacc.vf v25, fa0, v11\n\t" // tmp135a + tmp135b * 2 "vfmacc.vf v27, fa2, v11\n\t" // tmp135a + tmp135b * 8 //--------------------------------------------- "vfadd.vv v24, v24, v16\n\t" // + bias "vfmacc.vf v26, fa2, v12\n\t" // tmp024a + tmp024b * 4 + tmp024c * 8 = tmp[2][m] "vfmacc.vf v28, fa0, v12\n\t" // tmp024a + tmp024b * 16 + tmp024c + tmp024c = tmp[4][m] "vfmacc.vf v15, fa4, v11\n\t" // tmp135b * 32 + tmp135c "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa3, v13\n\t" // tmp135a + tmp135b * 2 + tmp135c * 16 = tmp[1][m] "vfmacc.vf v27, fa1, v13\n\t" // tmp135a + tmp135b * 8 + tmp135c * 4 = tmp[3][m] "vfadd.vv v26, v26, v16\n\t" // + bias "vfadd.vv v28, v28, v16\n\t" // + bias "vfadd.vv v29, v7, v15\n\t" // tmp[m][7] + tmp135a + tmp135b * 32 + tmp135c "vse.v v26, (a2)\n\t" "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfadd.vv v25, v25, v16\n\t" // + bias "vfadd.vv v27, v27, v16\n\t" // + bias "vfadd.vv v29, v29, v16\n\t" // + bias "vse.v v25, (a1)\n\t" "vse.v v27, (a3)\n\t" "vse.v v29, (a5)\n\t" "add %1, %1, t1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(output0_tm_0), // %0 "=r"(output0), // %1 "=r"(tmp1), // %2 "=r"(ratio_ptr), // %3 "=r"(tiles), // %4 "=r"(out_w6), // %5 "=r"(bias_tmp) // %6 :"0"(output0_tm_0), "1"(output0), "2"(tmp1), "3"(ratio_ptr), "4"(tiles), "5"(out_w6), "6"(bias_tmp) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v24", "v25", "v26", "v27", "v28", "v29", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "fa0", "fa1", "fa2", "fa3", "fa4" ); } } csi_mem_free(tmp1); } csi_mem_free(output_dot_buf); // crop the output after transform: cut extra part (right , bottom) csi_c906_crop_output_pack8to1_fp16(output_tm1_buf, output_data, out_c, out_h, out_w, block_h * 6, block_w * 6); output_data += output_size; csi_mem_free(output_tm1_buf); } if (!flag_bias) { csi_mem_free(bias_data); bias_data = NULL; } return CSINN_TRUE; } void csi_c906_conv3x3s1_winograd43_transform_kernel_pack8_fp16(struct csi_tensor *o_kernel, struct csi_tensor *t_kernel) { int32_t outch = o_kernel->dim[0]; int32_t inch = o_kernel->dim[1]; __fp16 *kernel_data = (__fp16 *)o_kernel->data; // for kernel transform buf, 3x3 --> 6x6 __fp16 *kernel_tm = (__fp16 *)csi_mem_alloc(outch * inch * 6 * 6 * sizeof(__fp16)); // kernel transform matrix: G const __fp16 ktm[6][3] = { { 1.0f/4, 0.0f, 0.0f}, { -1.0f/6, -1.0f/6, -1.0f/6}, { -1.0f/6, 1.0f/6, -1.0f/6}, { 1.0f/24, 1.0f/12, 1.0f/6}, { 1.0f/24, -1.0f/12, 1.0f/6}, { 0.0f, 0.0f, 1.0f} }; csi_tensor_copy(t_kernel, o_kernel); for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const __fp16* kernel0 = kernel_data + p * inch * 9 + q * 9; __fp16* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const __fp16 *k0 = kernel0; const __fp16 *k1 = kernel0 + 3; const __fp16 *k2 = kernel0 + 6; // h : first compute the transport matrix tmp = (g * GT)T __fp16 tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { __fp16* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // [O, I, 6, 6] --> [O/4, 6*6, I, 4] __fp16 *kernel_tm_pack4 = (__fp16 *)csi_mem_alloc(outch * inch * 6 * 6 * sizeof(__fp16)); t_kernel->data = kernel_tm_pack4; for (int oc = 0; oc < outch / 8; oc++) { __fp16 *g0 = kernel_tm_pack4 + oc * 36 * inch * 8; const __fp16 *k0 = kernel_tm + oc * 36 * inch * 8; const __fp16 *k1 = k0 + 36 * inch; const __fp16 *k2 = k1 + 36 * inch; const __fp16 *k3 = k2 + 36 * inch; const __fp16 *k4 = k3 + 36 * inch; const __fp16 *k5 = k4 + 36 * inch; const __fp16 *k6 = k5 + 36 * inch; const __fp16 *k7 = k6 + 36 * inch; for (int k = 0; k < 36; k++) { __fp16 *g00 = g0 + k * inch * 8; for (int ic = 0; ic < inch / 8; ic++) { for (int i = 0; i < 8; i++) { const __fp16 *k00 = k0 + (ic * 8 + i) * 36; const __fp16 *k10 = k1 + (ic * 8 + i) * 36; const __fp16 *k20 = k2 + (ic * 8 + i) * 36; const __fp16 *k30 = k3 + (ic * 8 + i) * 36; const __fp16 *k40 = k4 + (ic * 8 + i) * 36; const __fp16 *k50 = k5 + (ic * 8 + i) * 36; const __fp16 *k60 = k6 + (ic * 8 + i) * 36; const __fp16 *k70 = k7 + (ic * 8 + i) * 36; g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } csi_mem_free(kernel_tm); } int csi_c906_conv3x3s1_winograd43_pack8_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { __fp16 *input_data = (__fp16 *)input->data; __fp16 *output_data = (__fp16 *)output->data; __fp16 *kernel_data = (__fp16 *)params->conv_extra.kernel_tm->data; __fp16 *bias_data = (__fp16 *)bias->data; // param int kernel_h = kernel->dim[2]; int kernel_w = kernel->dim[3]; int stride_h = params->stride_height; int stride_w = params->stride_width; int dilation_h = params->dilation_height; int dilation_w = params->dilation_width; int pad_left = params->pad_left; int pad_top = params->pad_top; int batch = input->dim[0]; int in_c = input->dim[1]; int in_h = input->dim[2]; int in_w = input->dim[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = kernel->dim[0]; int out_h = output->dim[2]; int out_w = output->dim[3]; int output_size = out_c * out_h * out_w; // winograd param int block_h = (out_h + 3) / 4; int block_w = (out_w + 3) / 4; int padded_in_h = block_h * 4 + 2; // block * 4 for alignment with 4,kernel = 3 * 3, stride = 1,thus input_size + 2 int padded_in_w = block_w * 4 + 2; int padded_in_hw = padded_in_h * padded_in_w; // element size after padding per channel /****************************** bias *****************************/ bool flag_bias = 1; // default: conv2d layer include bias if (bias_data == NULL) { flag_bias = 0; bias_data = (__fp16 *)csi_mem_alloc(out_c * sizeof(__fp16)); } for(int n = 0; n < batch; n++) { // pad buffer: [in_c/4 h w 4] __fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * padded_in_hw * sizeof(__fp16)); // pad input csi_c906_pad_input_pack1to8_fp16(input_data, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_top, pad_left); input_data += input_size; // input transform buffer1: [in_ch/4, 36, blocks, 6] __fp16 *input_tm1_buf = (__fp16 *)csi_mem_alloc(in_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); /****************************** transform input *****************************/ /* BT = { { 4 0 -5 0 1 0 }; { 0 -4 -4 1 1 0 }; { 0 4 -4 -1 1 0 }; { 0 -2 -1 2 1 0 }; { 0 2 -1 -2 1 0 }; { 0 4 0 -5 0 1 } }; */ int tiles = block_h * block_w; #pragma omp parallel for num_threads(1) for(int q = 0; q < in_c / 4; q++) { __fp16 *img0 = input_padd_buf + q * padded_in_h * padded_in_w * 8; // feature map after padding - q channel __fp16 *img0_tm = input_tm1_buf + q * 36 * tiles * 8; // transform and interleave - q channel __fp16 *tmp = (__fp16 *)csi_mem_alloc(6 * 6 * 8 * sizeof(__fp16)); for(int i = 0; i < block_h; i++) { for(int j = 0; j < block_w; j++) { __fp16 *r0 = img0 + (i * padded_in_w * 4 + j * 4) * 8; // feature map after padding 6*6 start addr __fp16 *r0_tm = img0_tm + (i * block_w + j) * 8; // input_tm1 6*6 block start addr __fp16 ratio[] = {4, -4, 2, -2, -5}; // note: in fact cannot be output constrain __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 6\n\t" // m = 6 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = padded_in_w * 8 * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 4 "flh fa1, 2(%3)\n\t" // fa1 = -4 "flh fa2, 4(%3)\n\t" // fa2 = 2 "flh fa3, 6(%3)\n\t" // fa3 = -2 "flh fa4, 8(%3)\n\t" // fa4 = -5 "1:\n\t" "mv s1, %0\n\t" // s1 = r00 addr "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 96\n\t" // tmp[1][m] "addi a2, a1, 96\n\t" // tmp[2][m] "addi a3, a2, 96\n\t" // tmp[3][m] "addi a4, a3, 96\n\t" // tmp[4][m] "addi a5, a4, 96\n\t" // tmp[5][m] "vle.v v0, (s1)\n\t" // r00 "addi s1, s1, 16\n\t" "vle.v v1, (s1)\n\t" // r01 "addi s1, s1, 16\n\t" "vle.v v2, (s1)\n\t" // r02 "addi s1, s1, 16\n\t" "vle.v v3, (s1)\n\t" // r03 "addi s1, s1, 16\n\t" "vle.v v4, (s1)\n\t" // r04 "addi s1, s1, 16\n\t" "vle.v v5, (s1)\n\t" // r05 "addi s1, s1, 16\n\t" "vmv.v.v v24, v4\n\t" "vmv.v.v v29, v5\n\t" //--------------------------------------------- "vfmacc.vf v24, fa0, v0\n\t" // r04 + 4 * r00 "vfmacc.vf v24, fa4, v2\n\t" // r04 + 4 * r00 - 5 * r02 "vse.v v24, (a0)\n\t" //--------------------------------------------- "vfadd.vv v25, v3, v4\n\t" // r03 + r04 "vfadd.vv v6, v1, v2\n\t" // r01 + r02 "vfmacc.vf v25, fa1, v6\n\t" // r03 + r04 - 4 * (r01 - r02) "vse.v v25, (a1)\n\t" //--------------------------------------------- "vfsub.vv v26, v4, v3\n\t" // r04 - r03 "vfsub.vv v7, v1, v2\n\t" // r01 - r02 "vfmacc.vf v26, fa0, v7\n\t" // r04 - r03 + 4 * (r01 - r02) "vse.v v26, (a2)\n\t" //--------------------------------------------- "vfsub.vv v8, v1, v3\n\t" // r01 - r03 "vfsub.vv v27, v4, v2\n\t" // r04 - r02 "vfsub.vv v28, v4, v2\n\t" // r04 - r02 "vfmacc.vf v27, fa3, v8\n\t" // r04 - r02 - 2 * (r01 - r03) "vse.v v27, (a3)\n\t" "vfmacc.vf v28, fa2, v8\n\t" // r04 - r02 + 2 * (r01 - r03) "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v29, fa0, v1\n\t" // r05 + 4 * r01 "vfmacc.vf v29, fa4, v3\n\t" // r05 + 4 * r01 - 5 * r03 "vse.v v29, (a5)\n\t" //--------------------------------------------- "add %0, %0, t1\n\t" // padding feature map 6*6 next line addr "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 6\n\t" // m = 6 "slli t1, %5, 4\n\t" // t1 = tiles * 8 * 2 bytes "mulw t2, t0, t1\n\t" // t2 = tiles * 6 blocks * 8 channels * 2 bytes "3:\n\t" "mv a0, %1\n\t" // r0_tm_0 "add a1, a0, t1\n\t" // r0_tm_1 "add a2, a1, t1\n\t" // r0_tm_2 "add a3, a2, t1\n\t" // r0_tm_3 "add a4, a3, t1\n\t" // r0_tm_4 "add a5, a4, t1\n\t" // r0_tm_5 "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vmv.v.v v24, v4\n\t" "vmv.v.v v29, v5\n\t" //--------------------------------------------- "vfmacc.vf v24, fa0, v0\n\t" // r04 + 4 * r00 "vfmacc.vf v24, fa4, v2\n\t" // r04 * 4 * r00 - 5 * r02 "vse.v v24, (a0)\n\t" //--------------------------------------------- "vfadd.vv v25, v3, v4\n\t" // r03 + r04 "vfadd.vv v6, v1, v2\n\t" // r01 + r02 "vfmacc.vf v25, fa1, v6\n\t" // r03 + r04 - 4 * (r01 - r02) "vse.v v25, (a1)\n\t" //--------------------------------------------- "vfsub.vv v26, v4, v3\n\t" // r04 - r03 "vfsub.vv v7, v1, v2\n\t" // r01 - r02 "vfmacc.vf v26, fa0, v7\n\t" // r04 - r03 + 4 * (r01 - r02) "vse.v v26, (a2)\n\t" //--------------------------------------------- "vfsub.vv v8, v1, v3\n\t" // r01 - r03 "vfsub.vv v27, v4, v2\n\t" // r04 - r02 "vfsub.vv v28, v4, v2\n\t" // r04 - r02 "vfmacc.vf v27, fa3, v8\n\t" // r04 - r02 - 2 * (r01 - r03) "vse.v v27, (a3)\n\t" "vfmacc.vf v28, fa2, v8\n\t" // r04 - r02 + 2 * (r01 - r03) "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v29, fa0, v1\n\t" // r05 + 4 * r01 "vfmacc.vf v29, fa4, v3\n\t" // r05 + 4 * r01 - 5 * r03 "vse.v v29, (a5)\n\t" //--------------------------------------------- "add %1, %1, t2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(r0), // %0 "=r"(r0_tm), // %1 "=r"(tmp), // %2 "=r"(ratio_ptr), // %3 "=r"(padded_in_w), // %4 "=r"(tiles) // %5 :"0"(r0), "1"(r0_tm), "2"(tmp), "3"(ratio_ptr), "4"(padded_in_w), "5"(tiles) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v24", "v25", "v26", "v27", "v28", "v29", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5" ); } } csi_mem_free(tmp); } csi_mem_free(input_padd_buf); /*********************************** dot ***************************************/ // reorder input_tm1_buf __fp16 *input_tm2_buf = (__fp16 *)csi_mem_alloc(36 * tiles * in_c * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int r = 0; r < 36; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // input_tm2 r channel data int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; //----------------- for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8 * 1]; tm2[2] = tm1[l + 8 * 2]; tm2[3] = tm1[l + 8 * 3]; tm2[4] = tm1[l + 8 * 4]; tm2[5] = tm1[l + 8 * 5]; tm2[6] = tm1[l + 8 * 6]; tm2[7] = tm1[l + 8 * 7]; tm2 += 8; } tm1 += 36 * tiles * 8; } } for (; t + 3 < tiles; t += 4) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8 * 1]; tm2[2] = tm1[l + 8 * 2]; tm2[3] = tm1[l + 8 * 3]; tm2 += 4; } tm1 += 36 * tiles * 8; } } for (; t + 1 < tiles; t += 2) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8]; tm2 += 2; } tm1 += 36 * tiles * 8; } } for (; t < tiles; t++) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2++; } tm1 += 36 * tiles * 8; } } } csi_mem_free(input_tm1_buf); // output_dot_buf: [out_c/4, 36, blocks, 4] __fp16 *output_dot_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *output0_tm = output_dot_buf + p * 36 * tiles * 8; // 8 channel dot output __fp16 *kernel0_tm = kernel_data + p * 36 * in_c * 8; // 8 channel kernel for (int r = 0; r < 36; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // img_tm2 第r个channel int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" "vmv.v.x v4, zero\n\t" "vmv.v.x v5, zero\n\t" "vmv.v.x v6, zero\n\t" "vmv.v.x v7, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "flh fa4, 8(%0)\n\t" "flh fa5, 10(%0)\n\t" "flh fa6, 12(%0)\n\t" "flh fa7, 14(%0)\n\t" "addi %0, %0, 16\n\t" "vle.v v8, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v8\n\t" "vfmacc.vf v1, fa1, v8\n\t" "vfmacc.vf v2, fa2, v8\n\t" "vfmacc.vf v3, fa3, v8\n\t" "vfmacc.vf v4, fa4, v8\n\t" "vfmacc.vf v5, fa5, v8\n\t" "vfmacc.vf v6, fa6, v8\n\t" "vfmacc.vf v7, fa7, v8\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v4, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v5, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v6, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v7, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", "t0" ); } for (; t + 3 < tiles; t += 4) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "addi %0, %0, 8\n\t" "vle.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v4\n\t" "vfmacc.vf v1, fa1, v4\n\t" "vfmacc.vf v2, fa2, v4\n\t" "vfmacc.vf v3, fa3, v4\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "fa0", "fa1", "fa2", "fa3", "t0" ); } for (; t + 1 < tiles; t += 2) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "addi %0, %0, 4\n\t" "vle.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v2\n\t" "vfmacc.vf v1, fa1, v2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "fa0", "fa1", "t0" ); } for (; t < tiles; t++) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" // clear "1:\n\t" "flw fa0, (%0)\n\t" "addi %0, %0, 2\n\t" "vle.v v1, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "fa0", "t0" ); } } } csi_mem_free(input_tm2_buf); /*************************** transform output ****************************/ // output_tm1_buf: [out_c/4, out_h4, out_w4, 4] __fp16 *output_tm1_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 4 * 4 * sizeof(__fp16)); /* AT = { { 1 1 1 1 1 0 }, { 0 1 -1 2 -2 0 }, { 0 1 1 4 4 0 }, { 0 1 -1 8 -8 1 } }; */ #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *bias_tmp = bias_data + p * 8; __fp16 *out0_tm = output_dot_buf + p * 36 * block_h * block_w * 8; // 输出转换前/dot后 第p个channel __fp16 *out0 = output_tm1_buf + p * 4*block_h * 4*block_w * 8; // 转换后输出 第p个channel __fp16 *tmp1 = (__fp16 *)csi_mem_alloc(4 * 6 * 8 * sizeof(__fp16)); int out_w4 = block_w * 4; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { __fp16 *output0_tm_0 = out0_tm + (i * block_w + j) * 8; // 6*6 起始地址 __fp16 *output0 = out0 + (i * block_w * 4 * 4 + j * 4) * 8; // 输出 4*4 的起始地址 __fp16 ratio[] = {2.0, 4.0, 8.0}; __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 6\n\t" // m = 6 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = tiles * 8 * 2 "mulw t2, t0, t1\n\t" // t2 = tiles * 6 blocks * 8 channels * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 2 "flh fa1, 2(%3)\n\t" // fa1 = 4 "flh fa2, 4(%3)\n\t" // fa2 = 8 "mv s1, %0\n\t" "1:\n\t" // shape : [4 * 6] * [6 * 6] = [4 * 6] "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 96\n\t" // tmp[1][m] "addi a2, a1, 96\n\t" // tmp[2][m] "addi a3, a2, 96\n\t" // tmp[3][m] "vle.v v0, (s1)\n\t" // r00 "add s1, s1, t1\n\t" "vle.v v1, (s1)\n\t" // r01 "add s1, s1, t1\n\t" "vle.v v2, (s1)\n\t" // r02 "add s1, s1, t1\n\t" "vle.v v3, (s1)\n\t" // r03 "add s1, s1, t1\n\t" "vle.v v4, (s1)\n\t" // r04 "add s1, s1, t1\n\t" "vle.v v5, (s1)\n\t" // r05 "add s1, s1, t1\n\t" //--------------------------------------------- "vfadd.vv v26, v1, v2\n\t" // r01 + r02 = tmp02a "vfsub.vv v6, v1, v2\n\t" // r01 - r02 = tmp13a "vfadd.vv v7, v3, v4\n\t" // r03 + r04 = tmp02b "vfsub.vv v8, v3, v4\n\t" // r03 - r04 = tmp13b "vmv.v.v v25, v6\n\t" // v25 = tmp13a //--------------------------------------------- "vfadd.vv v24, v0, v26\n\t" // r00 + tmp02a "vfadd.vv v24, v24, v7\n\t" // r00 + tmp02a + tmp02b "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa0, v8\n\t" // tmp13a + 2 * tmp13b "vse.v v25, (a1)\n\t" "vfmacc.vf v26, fa1, v7\n\t" // tmp02a + 4 * tmp02b "vse.v v26, (a2)\n\t" "vfadd.vv v27, v5, v6\n\t" // r05 + tmp13a "vfmacc.vf v27, fa2, v8\n\t" // r05 + tmp13a * 8 tmp13b "vse.v v27, (a3)\n\t" //--------------------------------------------- "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 4\n\t" // m = 4 "slli t1, %5, 4\n\t" // t1 = out_w4 * 8 * 2 bytes "vle.v v16, (%6)\n\t" // load 8 channel bias data "3:\n\t" // shape : [4 * 6] * [6 * 4] = [4 * 4] "mv a0, %1\n\t" "addi a1, a0, 16\n\t" "addi a2, a1, 16\n\t" "addi a3, a2, 16\n\t" "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" //--------------------------------------------- "vfadd.vv v26, v1, v2\n\t" // r01 + r02 = tmp02a "vfsub.vv v6, v1, v2\n\t" // r01 - r02 = tmp13a "vfadd.vv v7, v3, v4\n\t" // r03 + r04 = tmp02b "vfsub.vv v8, v3, v4\n\t" // r03 - r04 = tmp13b "vmv.v.v v25, v6\n\t" // v25 = tmp13a //--------------------------------------------- "vfadd.vv v24, v0, v26\n\t" // r00 + tmp02a "vfadd.vv v24, v24, v7\n\t" // r00 + tmp02a + tmp02b "vfadd.vv v24, v24, v16\n\t" // add bias "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa0, v8\n\t" // tmp13a + 2 * tmp13b "vfadd.vv v25, v25, v16\n\t" // add bias "vse.v v25, (a1)\n\t" "vfmacc.vf v26, fa1, v7\n\t" // tmp02a + 4 * tmp02b "vfadd.vv v26, v26, v16\n\t" // add bias "vse.v v26, (a2)\n\t" "vfadd.vv v27, v5, v6\n\t" // r05 + tmp13a "vfmacc.vf v27, fa2, v8\n\t" // r05 + tmp13a * 8 tmp13b "vfadd.vv v27, v27, v16\n\t" // add bias "vse.v v27, (a3)\n\t" "add %1, %1, t1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(output0_tm_0), // %0 "=r"(output0), // %1 "=r"(tmp1), // %2 "=r"(ratio_ptr), // %3 "=r"(tiles), // %4 "=r"(out_w4), // %5 "=r"(bias_tmp) // %6 :"0"(output0_tm_0), "1"(output0), "2"(tmp1), "3"(ratio_ptr), "4"(tiles), "5"(out_w4), "6"(bias_tmp) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v24", "v25", "v26", "v27", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "fa0", "fa1", "fa2" ); } } csi_mem_free(tmp1); } csi_mem_free(output_dot_buf); // crop the output after transform: cut extra part (right , bottom) csi_c906_crop_output_pack8to1_fp16(output_tm1_buf, output_data, out_c, out_h, out_w, block_h * 4, block_w * 4); output_data += output_size; csi_mem_free(output_tm1_buf); } if (!flag_bias) { csi_mem_free(bias_data); bias_data = NULL; } return CSINN_TRUE; } void csi_c906_conv3x3s1_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { /* to do */ } void csi_c906_conv3x3s2_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { /* to do */ }
/* CSI-NN2 version 1.12.x */ /* the conditions for using winograd convolution in_channel >= 16 out_channel >= 16 input_height <= 120 input_width <= 120 */ #include "csi_c906.h" /* padding input for winograd input transform , and change memory layout to [n c/8 h w 8] input layout: [n c h w] input_padded layout: [n c/8 h w 8] constrain: input channel % 8 = 0 */ void csi_c906_pad_input_pack1to8_fp16(const __fp16 *input, __fp16 *input_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad_top, int pad_left) { int inc8 = inc / 8; int padded_hw = padded_h * padded_w; __fp16 *pad_ptr = input_padded; __fp16 *inp_ptr = (__fp16 *)input; int resi_h = padded_h - pad_top - inh; // remain to pad on h (pad_down) int resi_w = padded_w - pad_left - inw; // remain to pad on w (pad_right) asm volatile( "vsetvli zero, zero, e16, m1\n\t" "vmv.v.x v2, zero\n\t" // clear v2, for memset value 0 "mulw t1, %6, %7\n\t" // pad_top * padded_w "mulw t2, %6, %9\n\t" // pad_down * padded_w "mulw t0, %3, %4\n\t" // input_size per_channel "slli t0, t0, 1\n\t" // load stride = input_size * 2 "slli t6, t0, 3\n\t" // t6 = input_size * 8 * 2 "1:\n\t" // channel loop [inc/8] "mv a0, %0\n\t" // update input_addr "mv t5, %3\n\t" // t5 = in_h "beqz %7, 3f\n\t" // if pad_top = 0 "mv t3, t1\n\t" // t3 = num to memset "2:\n\t" // pad h_top "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 2b\n\t" "3:\n\t" // pad h_mid "mv t4, %4\n\t" // t4 = in_w "beqz %8, 5f\n\t" // if pad_left = 0 "mv t3, %8\n\t" // t3 = pad_left "4:\n\t" // pad w_left "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 4b\n\t" "5:\n\t" // pad w_mid "vlse.v v4, (a0), t0\n\t" "addi a0, a0, 2\n\t" "vse.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t4, t4, -1\n\t" "bnez t4, 5b\n\t" "beqz %10, 7f\n\t" // if pad_right = 0 "mv t3, %10\n\t" // t3 = pad_right "6:\n\t" // pad w_right "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 6b\n\t" "7:\n\t" "addi t5, t5, -1\n\t" "bnez t5, 3b\n\t" "beqz %9, 9f\n\t" // if pad_down = 0 "mv t3, t2\n\t" // t3 = num to memset 0 "8:\n\t" // pad h_down "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 8b\n\t" "9:\n\t" "add %0, %0, t6\n\t" // input_data jump to next 8 channel "addi %2, %2, -1\n\t" "bnez %2, 1b\n\t" :"=r"(inp_ptr), // %0 "=r"(pad_ptr), // %1 "=r"(inc8), // %2 "=r"(inh), // %3 "=r"(inw), // %4 "=r"(padded_hw), // %5 "=r"(padded_w), // %6 "=r"(pad_top), // %7 "=r"(pad_left), // %8 "=r"(resi_h), // %9 "=r"(resi_w) // %10 :"0"(inp_ptr), "1"(pad_ptr), "2"(inc8), "3"(inh), "4"(inw), "5"(padded_hw), "6"(padded_w), "7"(pad_top), "8"(pad_left), "9"(resi_h), "10"(resi_w) :"cc", "memory", "v2", "v4", "a0", "t0", "t1", "t2", "t3", "t4", "t5", "t6" ); } void csi_c906_crop_output_pack8to1_fp16(const __fp16 *output_trans, __fp16 *output, int out_c, int out_h, int out_w, int wino_h, int wino_w) { int out_c8 = out_c / 8; __fp16 *out_tm_ptr = (__fp16 *)output_trans; __fp16 *out_ptr = output; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mulw t0, %3, %4\n\t" // output_size per_channel "slli t0, t0, 1\n\t" // store_stride = output_size * 2 "slli t3, t0, 3\n\t" // t3 = output_size * 8 * 2 "slli t4, %6, 4\n\t" // t4 = wino_w * 8 * 2 "mulw t5, %5, %6\n\t" // crop_size per_channel "slli t5, t5, 4\n\t" // t5 = crop_size * 8 * 2 "1:\n\t" // channel loop [out_ch / 8] "mv a1, %1\n\t" // update output_addr "mv a0, %0\n\t" // update crop_addr per-channel "mv t1, %3\n\t" // t1 = out_h "2:\n\t" // crop h "mv t2, %4\n\t" // t2 = out_w "mv s1, a0\n\t" // update crop_addr per-row "3:\n\t" // crop w "vle.v v2, (s1)\n\t" "addi s1, s1, 16\n\t" "vsse.v v2, (a1), t0\n\t" "addi a1, a1, 2\n\t" "addi t2, t2, -1\n\t" "bnez t2, 3b\n\t" "add a0, a0, t4\n\t" // crop-data jump to next row "addi t1, t1, -1\n\t" "bnez t1, 2b\n\t" "4:\n\t" "add %1, %1, t3\n\t" // output_data jump to next 8 channel "add %0, %0, t5\n\t" // crop-data jump to next 8 channel "addi %2, %2, -1\n\t" "bnez %2, 1b\n\t" :"=r"(out_tm_ptr), // %0 "=r"(out_ptr), // %1 "=r"(out_c8), // %2 "=r"(out_h), // %3 "=r"(out_w), // %4 "=r"(wino_h), // %5 "=r"(wino_w) // %6 :"0"(out_tm_ptr), "1"(out_ptr), "2"(out_c8), "3"(out_h), "4"(out_w), "5"(wino_h), "6"(wino_w) :"cc", "memory", "v2", "v3", "a0", "a1", "s1", "t0", "t1", "t2", "t3", "t4", "t5" ); } /* constrain: output channel % 8 = 0 input channel % 8 = 0 kernel before: [O I 3*3] kernel after : [O/8 8*8 I 8] */ void csi_c906_conv3x3s1_winograd64_transform_kernel_pack8_fp16(struct csi_tensor *o_kernel, struct csi_tensor *t_kernel) { int32_t outch = o_kernel->dim[0]; int32_t inch = o_kernel->dim[1]; __fp16 *kernel_data = (__fp16 *)o_kernel->data; // for kernel transform buf, 3x3 --> 8x8 __fp16 *kernel_tm = (__fp16 *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(__fp16)); // kernel transform matrix: G const __fp16 ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; // const __fp16 ktm[8][3] = { // {1.0f, 0.0f, 0.0f}, // {-2.0f / 9, -2.0f / 9, -2.0f / 9}, // {-2.0f / 9, 2.0f / 9, -2.0f / 9}, // {1.0f / 90, 1.0f / 45, 2.0f / 45}, // {1.0f / 90, -1.0f / 45, 2.0f / 45}, // {32.0f / 45, 16.0f / 45, 8.0f / 45}, // {32.0f / 45, -16.0f / 45, 8.0f / 45}, // {0.0f, 0.0f, 1.0f} // }; csi_tensor_copy(t_kernel, o_kernel); for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const __fp16* kernel0 = kernel_data + p * inch * 9 + q * 9; __fp16* kernel_tmp = kernel_tm + p * inch * 64 + q * 64; // transform kernel const __fp16 *k0 = kernel0; const __fp16 *k1 = kernel0 + 3; const __fp16 *k2 = kernel0 + 6; // h : first compute the transport matrix tmp = (g * GT)T __fp16 tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 8; j++) { __fp16* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tmp[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd64 __fp16 *kernel_tm_pack8 = (__fp16 *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(__fp16)); t_kernel->data = kernel_tm_pack8; for (int oc = 0; oc < outch / 8; oc++) { __fp16 *g0 = kernel_tm_pack8 + oc * 64 * inch * 8; const __fp16 *k0 = kernel_tm + oc * 64 * inch * 8; const __fp16 *k1 = k0 + 64 * inch; const __fp16 *k2 = k1 + 64 * inch; const __fp16 *k3 = k2 + 64 * inch; const __fp16 *k4 = k3 + 64 * inch; const __fp16 *k5 = k4 + 64 * inch; const __fp16 *k6 = k5 + 64 * inch; const __fp16 *k7 = k6 + 64 * inch; for (int k = 0; k < 64; k++) { __fp16 *g00 = g0 + k * inch * 8; for (int ic = 0; ic < inch / 8; ic++) { for (int i = 0; i < 8; i++) { const __fp16 *k00 = k0 + (ic * 8 + i) * 64; const __fp16 *k10 = k1 + (ic * 8 + i) * 64; const __fp16 *k20 = k2 + (ic * 8 + i) * 64; const __fp16 *k30 = k3 + (ic * 8 + i) * 64; const __fp16 *k40 = k4 + (ic * 8 + i) * 64; const __fp16 *k50 = k5 + (ic * 8 + i) * 64; const __fp16 *k60 = k6 + (ic * 8 + i) * 64; const __fp16 *k70 = k7 + (ic * 8 + i) * 64; g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } csi_mem_free(kernel_tm); } /* constrain: output channel % 8 = 0 input channel % 8 = 0 */ int csi_c906_conv3x3s1_winograd64_pack8_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { // uint64_t start_time, end_time; // start_time = csi_get_timespec(); __fp16 *input_data = (__fp16 *)input->data; __fp16 *output_data = (__fp16 *)output->data; __fp16 *kernel_data = (__fp16 *)params->conv_extra.kernel_tm->data; __fp16 *bias_data = (__fp16 *)bias->data; // param int kernel_h = kernel->dim[2]; int kernel_w = kernel->dim[3]; int stride_h = params->stride_height; int stride_w = params->stride_width; int dilation_h = params->dilation_height; int dilation_w = params->dilation_width; int pad_left = params->pad_left; int pad_top = params->pad_top; int batch = input->dim[0]; int in_c = input->dim[1]; int in_h = input->dim[2]; int in_w = input->dim[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = kernel->dim[0]; int out_h = output->dim[2]; int out_w = output->dim[3]; int output_size = out_c * out_h * out_w; // winograd param int block_h = (out_h + 5) / 6; int block_w = (out_w + 5) / 6; int padded_in_h = block_h * 6 + 2; // block * 4 for alignment with 4,kernel = 3 * 3 ,stride = 1,thus input_size + 2 int padded_in_w = block_w * 6 + 2; int padded_in_hw = padded_in_h * padded_in_w; // element size after padding per channel /****************************** bias *****************************/ bool flag_bias = 1; // default: conv2d layer include bias if (bias_data == NULL) { flag_bias = 0; bias_data = (__fp16 *)csi_mem_alloc(out_c * sizeof(__fp16)); } for(int n = 0; n < batch; n++) { // pad buffer: [in_c/8 h w 8] __fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * padded_in_hw * sizeof(__fp16)); // pad input csi_c906_pad_input_pack1to8_fp16(input_data, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_top, pad_left); input_data += input_size; // input transform buffer1: [in_ch/8, 64, blocks, 8] __fp16 *input_tm1_buf = (__fp16 *)csi_mem_alloc(in_c * block_h * block_w * 8 * 8 * sizeof(__fp16)); /****************************** transform input *****************************/ /* BT = { { 1 0 -5.25 0 5.25 0 -1 0 }; { 0 1 1 -4.25 -4.25 1 1 0 }; { 0 -1 1 4.25 -4.25 -1 1 0 }; { 0 0.5 0.25 -2.5 -1.25 2 1 0 }; { 0 -0.5 0.25 2.5 -1.25 -2 1 0 }; { 0 2 4 -2.5 -5 0.5 1 0 }; { 0 -2 4 2.5 -5 -0.5 1 0 }; { 0 -1 0 5.25 0 -5.25 0 1 } }; */ // int in_h_tm = block_h * 8; // input height after transform // int in_w_tm = block_w * 8; int tiles = block_h * block_w; for(int q = 0; q < in_c / 8; q++) { __fp16 *img0 = input_padd_buf + q * padded_in_h * padded_in_w * 8; // feature map after padding - q channel __fp16 *img0_tm = input_tm1_buf + q * 64 * tiles * 8; // transform and interleave - q channel __fp16 *tmp = (__fp16 *)csi_mem_alloc(8 * 8 * 8 * sizeof(__fp16)); // __fp16 tmp[512] = {0.0}; // ?????? for(int i = 0; i < block_h; i++) { for(int j = 0; j < block_w; j++) { __fp16 *r0 = img0 + (i * padded_in_w * 6 + j * 6) * 8; // feature map after padding 8*8 start addr __fp16 *r0_tm = img0_tm + (i * block_w + j) * 8; // input_tm1 8*8 block start addr __fp16 ratio[] = {5.25, -4.25, 0.25, -1.25, 4.0, 0.5, -2.5, 2.0}; // note: in fact cannot be output constrain __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 8\n\t" // m = 8 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = padded_in_w * 8 * 2bytes "flh fa0, 0(%3)\n\t" // fa0 = 5.25 "flh fa1, 2(%3)\n\t" // fa1 = -4.25 "flh fa2, 4(%3)\n\t" // fa2 = 0.25 "flh fa3, 6(%3)\n\t" // fa3 = -1.25 "flh fa4, 8(%3)\n\t" // fa4 = 4.0 "flh fa5, 10(%3)\n\t" // fa5 = 0.5 "flh fa6, 12(%3)\n\t" // fa6 = -2.5 "flh fa7, 14(%3)\n\t" // fa7 = 2.0 "1:\n\t" "mv s1, %0\n\t" // s1 = r00 addr "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 128\n\t" // tmp[1][m] "addi a2, a1, 128\n\t" // tmp[2][m] "addi a3, a2, 128\n\t" // tmp[3][m] "addi a4, a3, 128\n\t" // tmp[4][m] "addi a5, a4, 128\n\t" // tmp[5][m] "addi a6, a5, 128\n\t" // tmp[6][m] "addi a7, a6, 128\n\t" // tmp[7][m] "vle.v v0, (s1)\n\t" // r00 "addi s1, s1, 16\n\t" "vle.v v1, (s1)\n\t" // r01 "addi s1, s1, 16\n\t" "vle.v v2, (s1)\n\t" // r02 "addi s1, s1, 16\n\t" "vle.v v3, (s1)\n\t" // r03 "addi s1, s1, 16\n\t" "vle.v v4, (s1)\n\t" // r04 "addi s1, s1, 16\n\t" "vle.v v5, (s1)\n\t" // r05 "addi s1, s1, 16\n\t" "vle.v v6, (s1)\n\t" // r06 "addi s1, s1, 16\n\t" "vle.v v7, (s1)\n\t" // r07 "addi s1, s1, 16\n\t" "vmv.v.v v10, v6\n\t" //--------------------------------------------- "vfsub.vv v8, v4, v2\n\t" // r04 - r02 "vfsub.vv v9, v3, v5\n\t" // r03 - r05 "vfsub.vv v24, v0, v6\n\t" // r00 - r06 "vfsub.vv v31, v7, v1\n\t" // r07 - r01 "vfmacc.vf v10, fa2, v2\n\t" // r06 + r02 * 0.25f "vfmul.vf v11, v1, fa5\n\t" // r01 * 0.5f "vfmul.vf v12, v1, fa7\n\t" // r01 * 2.0f "vfmacc.vf v24, fa0, v8\n\t" // r00 - r06 + 5.25 * (r04 - r02) = tmp[0][m] "vfmacc.vf v31, fa0, v9\n\t" // r07 - r01 + 5.25 * (r03 - r05) = tmp[7][m] //--------------------------------------------- "vfadd.vv v8, v2, v6\n\t" // r02 + r06 "vfadd.vv v9, v1, v5\n\t" // r01 + r05 "vfmacc.vf v11, fa6, v3\n\t" // r01 * 0.5f - r03 * 2.5f "vfmacc.vf v12, fa6, v3\n\t" // r01 * 2.f - r03 * 2.5f "vfmacc.vf v2, fa3, v4\n\t" // r02 - r04 * 1.25f 注意 "vfmacc.vf v10, fa3, v4\n\t" // r06 + r02 * 0.25f - r04 * 1.25f = tmp34a "vfmacc.vf v8, fa1, v4\n\t" // r02 + r06 - r04 * 4.25f = tmp12a "vfmacc.vf v9, fa1, v3\n\t" // r01 + r05 - r03 * 4.25f = tmp12b "vfmacc.vf v11, fa7, v5\n\t" // r01 * 0.5f - r03 * 2.5f + r05 * 2.0 = tmp34b "vfmacc.vf v12, fa5, v5\n\t" // r01 * 2.f - r03 * 2.5f + r05 * 0.5 = tmp56b "vse.v v24, (a0)\n\t" "vse.v v31, (a7)\n\t" "vfadd.vv v25, v8, v9\n\t" // tmp12a + tmp12b = tmp[1][m] "vfsub.vv v26, v8, v9\n\t" // tmp12a - tmp12b = tmp[2][m] //--------------------------------------------- "vfmacc.vf v6, fa4, v2\n\t" // r06 + (r02 - r04 * 1.25f) * 4 = tmp56a "vfadd.vv v27, v10, v11\n\t" // tmp34a + tmp34b = tmp[3][m] "vfsub.vv v28, v10, v11\n\t" // tmp34a - tmp34b = tmp[4][m] "vfadd.vv v29, v6, v12\n\t" // tmp56a + tmp56b = tmp[5][m] "vfsub.vv v30, v6, v12\n\t" // tmp56a - tmp56b = tmp[6][m] "vse.v v25, (a1)\n\t" "vse.v v26, (a2)\n\t" "vse.v v27, (a3)\n\t" "vse.v v28, (a4)\n\t" "vse.v v29, (a5)\n\t" "vse.v v30, (a6)\n\t" //--------------------------------------------- "add %0, %0, t1\n\t" // padding feature map 8*8 next line addr "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 8\n\t" // m = 8 "slli t1, %5, 4\n\t" // t1 = tiles * 8 * 2 bytes "slli t2, %5, 7\n\t" // t2 = tiles * 8 * 8 * 2 bytes "3:\n\t" "mv a0, %1\n\t" // r0_tm_0 "add a1, a0, t1\n\t" // r0_tm_1 "add a2, a1, t1\n\t" // r0_tm_2 "add a3, a2, t1\n\t" // r0_tm_3 "add a4, a3, t1\n\t" // r0_tm_4 "add a5, a4, t1\n\t" // r0_tm_5 "add a6, a5, t1\n\t" // r0_tm_6 "add a7, a6, t1\n\t" // r0_tm_7 "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vle.v v6, (t5)\n\t" // tmp[m][6] "addi t5, t5, 16\n\t" "vle.v v7, (t5)\n\t" // tmp[m][7] "addi t5, t5, 16\n\t" "vmv.v.v v10, v6\n\t" //--------------------------------------------- "vfsub.vv v8, v4, v2\n\t" // tmp04 - tmp02 (tmp[m][4] - tmp[m][2]) "vfsub.vv v9, v3, v5\n\t" // tmp03 - tmp05 "vfsub.vv v24, v0, v6\n\t" // tmp00 - tmp06 "vfsub.vv v31, v7, v1\n\t" // tmp07 - tmp01 "vfmacc.vf v10, fa2, v2\n\t" // tmp06 + tmp02 * 0.25f "vfmul.vf v11, v1, fa5\n\t" // tmp01 * 0.5f "vfmul.vf v12, v1, fa7\n\t" // tmp01 * 2.0f "vfmacc.vf v24, fa0, v8\n\t" // tmp00 - tmp06 + 5.25 * (tmp04 - tmp02) = r0_tm_0[m] "vfmacc.vf v31, fa0, v9\n\t" // tmp07 - tmp01 + 5.25 * (tmp03 - tmp05) = r0_tm_7[m] //--------------------------------------------- "vfadd.vv v8, v2, v6\n\t" // tmp02 + tmp06 "vfadd.vv v9, v1, v5\n\t" // tmp01 + tmp05 "vfmacc.vf v11, fa6, v3\n\t" // tmp01 * 0.5f - tmp03 * 2.5f "vfmacc.vf v12, fa6, v3\n\t" // tmp01 * 2.f - tmp03 * 2.5f "vfmacc.vf v2, fa3, v4\n\t" // tmp02 - tmp04 * 1.25f "vfmacc.vf v10, fa3, v4\n\t" // tmp06 + tmp02 * 0.25f - tmp04 * 1.25f = tmp34a "vfmacc.vf v8, fa1, v4\n\t" // tmp02 + tmp06 - tmp04 * 4.25f = tmp12a "vfmacc.vf v9, fa1, v3\n\t" // tmp01 + tmp05 - tmp03 * 4.25f = tmp12b "vfmacc.vf v11, fa7, v5\n\t" // tmp01 * 0.5f - tmp03 * 2.5f + tmp05 * 2.0 = tmp34b "vfmacc.vf v12, fa5, v5\n\t" // tmp01 * 2.f - tmp03 * 2.5f + tmp05 * 0.5 = tmp56b "vse.v v24, (a0)\n\t" "vse.v v31, (a7)\n\t" "vfadd.vv v25, v8, v9\n\t" // tmp12a + tmp12b = r0_tm_1[m] "vfsub.vv v26, v8, v9\n\t" // tmp12a - tmp12b = r0_tm_2[m] //--------------------------------------------- "vfmacc.vf v6, fa4, v2\n\t" // tmp06 + (tmp02 - tmp04 * 1.25f) * 4 = tmp56a "vfadd.vv v27, v10, v11\n\t" // tmp34a + tmp34b = r0_tm_3[m] "vfsub.vv v28, v10, v11\n\t" // tmp34a - tmp34b = r0_tm_4[m] "vfadd.vv v29, v6, v12\n\t" // tmp56a + tmp56b = r0_tm_5[m] "vfsub.vv v30, v6, v12\n\t" // tmp56a - tmp56b = r0_tm_6[m] "vse.v v25, (a1)\n\t" "vse.v v26, (a2)\n\t" "vse.v v27, (a3)\n\t" "vse.v v28, (a4)\n\t" "vse.v v29, (a5)\n\t" "vse.v v30, (a6)\n\t" "add %1, %1, t2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(r0), // %0 "=r"(r0_tm), // %1 "=r"(tmp), // %2 "=r"(ratio_ptr), // %3 "=r"(padded_in_w), // %4 "=r"(tiles) // %5 :"0"(r0), "1"(r0_tm), "2"(tmp), "3"(ratio_ptr), "4"(padded_in_w), "5"(tiles) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7" ); } } csi_mem_free(tmp); } csi_mem_free(input_padd_buf); /*********************************** dot ***************************************/ // reorder input_tm1_buf __fp16 *input_tm2_buf = (__fp16 *)csi_mem_alloc(64 * tiles * in_c * sizeof(__fp16)); for (int r = 0; r < 64; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // input_tm2 r channel data int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; //---------------------------- // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8 * 1]; // tm2[2] = tm1[l + 8 * 2]; // tm2[3] = tm1[l + 8 * 3]; // tm2[4] = tm1[l + 8 * 4]; // tm2[5] = tm1[l + 8 * 5]; // tm2[6] = tm1[l + 8 * 6]; // tm2[7] = tm1[l + 8 * 7]; // tm2 += 8; // } // tm1 += 64 * tiles * 8; // } //----------------------------- asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v2, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v3, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v4, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v5, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v6, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v7, (a0)\n\t" "vsseg8e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 128\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "a0", "t1", "t2" ); } for (; t + 3 < tiles; t += 4) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8 * 1]; // tm2[2] = tm1[l + 8 * 2]; // tm2[3] = tm1[l + 8 * 3]; // tm2 += 4; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v2, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v3, (a0)\n\t" "vsseg4e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 64\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "a0", "t1", "t2" ); } for (; t + 1 < tiles; t += 2) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8]; // tm2 += 2; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "vsseg2e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 32\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "a0", "t1", "t2" ); } for (; t < tiles; t++) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2++; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vse.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 16\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "a0", "t1", "t2" ); } } csi_mem_free(input_tm1_buf); // output_dot_buf: [out_c/8, 64, blocks, 8] __fp16 *output_dot_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 8 * 8 * sizeof(__fp16)); for (int p = 0; p < out_c / 8; p++) { __fp16 *output0_tm = output_dot_buf + p * 64 * tiles * 8; __fp16 *kernel0_tm = kernel_data + p * 64 * in_c * 8; for (int r = 0; r < 64; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // img_tm2 第r个channel int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" "vmv.v.x v4, zero\n\t" "vmv.v.x v5, zero\n\t" "vmv.v.x v6, zero\n\t" "vmv.v.x v7, zero\n\t" // clear "1:\n\t" "vle.v v8, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "flh fa4, 8(%0)\n\t" "flh fa5, 10(%0)\n\t" "flh fa6, 12(%0)\n\t" "flh fa7, 14(%0)\n\t" "addi %0, %0, 16\n\t" "vfmacc.vf v0, fa0, v8\n\t" "vfmacc.vf v1, fa1, v8\n\t" "vfmacc.vf v2, fa2, v8\n\t" "vfmacc.vf v3, fa3, v8\n\t" "vfmacc.vf v4, fa4, v8\n\t" "vfmacc.vf v5, fa5, v8\n\t" "vfmacc.vf v6, fa6, v8\n\t" "vfmacc.vf v7, fa7, v8\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v4, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v5, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v6, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v7, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", "t0" ); } for (; t + 3 < tiles; t += 4) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" // clear "1:\n\t" "vle.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "addi %0, %0, 8\n\t" "vfmacc.vf v0, fa0, v4\n\t" "vfmacc.vf v1, fa1, v4\n\t" "vfmacc.vf v2, fa2, v4\n\t" "vfmacc.vf v3, fa3, v4\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "fa0", "fa1", "fa2", "fa3", "t0" ); } for (; t + 1 < tiles; t += 2) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" // clear "1:\n\t" "vle.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "addi %0, %0, 4\n\t" "vfmacc.vf v0, fa0, v2\n\t" "vfmacc.vf v1, fa1, v2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "fa0", "fa1", "t0" ); } for (; t < tiles; t++) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c= "vmv.v.x v0, zero\n\t" // clear "1:\n\t" "vle.v v1, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "addi %0, %0, 2\n\t" "vfmacc.vf v0, fa0, v1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "fa0", "t0" ); } } } csi_mem_free(input_tm2_buf); /*************************** transform output ****************************/ // output_tm1_buf: [out_c/8, out_h6, out_w6, 8] __fp16 *output_tm1_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); /* AT = { { 1 1 1 1 1 1 1 0 }; { 0 1 -1 2 -2 1/2 -1/2 0 }; { 0 1 1 4 4 1/4 1/4 0 }; { 0 1 -1 8 -8 1/8 -1/8 0 }; { 0 1 1 16 16 1/16 1/16 0 }; { 0 1 -1 32 -32 1/32 -1/32 1 } }; AT = { { 1 1 1 1 1 32 32 0 }; { 0 1 -1 2 -2 16 -16 0 }; { 0 1 1 4 4 8 8 0 }; { 0 1 -1 8 -8 4 -4 0 }; { 0 1 1 16 16 2 2 0 }; { 0 1 -1 32 -32 1 -1 1 } }; */ for (int p = 0; p < out_c / 8; p++) { __fp16 *bias_tmp = bias_data + p * 8; __fp16 *out0_tm = output_dot_buf + p * 64 * block_h * block_w * 8; // 输出转换前/dot后 第p个channel __fp16 *out0 = output_tm1_buf + p * 6*block_h * 6*block_w * 8; // 转换后输出 第p个channel __fp16 *tmp1 = (__fp16 *)csi_mem_alloc(6 * 8 * 8 * sizeof(__fp16)); // __fp16 tmp[6][8][8]; int out_w6 = block_w * 6; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { __fp16 *output0_tm_0 = out0_tm + (i * block_w + j) * 8; // 8*8 起始地址 __fp16 *output0 = out0 + (i * block_w * 6 * 6 + j * 6) * 8; // 输出 6*6 的起始地址 __fp16 ratio[] = {2.0, 4.0, 8.0, 16.0, 32.0}; __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 8\n\t" // m = 8 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = tiles * 8 * 2 "slli t2, %4, 7\n\t" // t2 = tiles * 8 * 8 * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 2 "flh fa1, 2(%3)\n\t" // fa1 = 4 "flh fa2, 4(%3)\n\t" // fa2 = 8 "flh fa3, 6(%3)\n\t" // fa3 = 16 "flh fa4, 8(%3)\n\t" // fa4 = 32 "mv s1, %0\n\t" "1:\n\t" // shape : [6 * 8] * [8 * 8] = [6 * 8] "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 128\n\t" // tmp[1][m] "addi a2, a1, 128\n\t" // tmp[2][m] "addi a3, a2, 128\n\t" // tmp[3][m] "addi a4, a3, 128\n\t" // tmp[4][m] "addi a5, a4, 128\n\t" // tmp[5][m] "vle.v v0, (s1)\n\t" // r00 "add s1, s1, t1\n\t" "vle.v v1, (s1)\n\t" // r01 "add s1, s1, t1\n\t" "vle.v v2, (s1)\n\t" // r02 "add s1, s1, t1\n\t" "vle.v v3, (s1)\n\t" // r03 "add s1, s1, t1\n\t" "vle.v v4, (s1)\n\t" // r04 "add s1, s1, t1\n\t" "vle.v v5, (s1)\n\t" // r05 "add s1, s1, t1\n\t" "vle.v v6, (s1)\n\t" // r06 "add s1, s1, t1\n\t" "vle.v v7, (s1)\n\t" // r07 "add s1, s1, t1\n\t" //--------------------------------------------- "vfadd.vv v8, v1, v2\n\t" // r01 + r02 = tmp024a "vfsub.vv v9, v1, v2\n\t" // r01 - r02 = tmp135a "vfadd.vv v10, v3, v4\n\t" // r03 + r04 = tmp024b "vfsub.vv v11, v3, v4\n\t" // r03 - r04 = tmp135b "vfadd.vv v12, v5, v6\n\t" // r05 + r06 = tmp024c "vfsub.vv v13, v5, v6\n\t" // r05 - r06 = tmp135c "vfadd.vv v0, v0, v8\n\t" // r00 + tmp024a "vfadd.vv v7, v7, v9\n\t" // r07 + tmp135a "vmv.v.v v14, v10\n\t" // v14 = tmp024b "vmv.v.v v26, v8\n\t" // v26 = tmp024a "vmv.v.v v28, v8\n\t" // v28 = tmp024a "vfmacc.vf v26, fa1, v10\n\t" // tmp024a + tmp024b * 4 "vfmacc.vf v14, fa4, v12\n\t" // tmp024b + tmp024c * 32 "vfmacc.vf v28, fa3, v10\n\t" // tmp024a + tmp024b * 16 "vmv.v.v v15, v13\n\t" // v15 = tmp135c "vmv.v.v v25, v9\n\t" // v25 = tmp135a "vmv.v.v v27, v9\n\t" // v27 = tmp135a "vfadd.vv v24, v0, v14\n\t" // r00 + tmp024a + tmp024b + tmp024c * 32 = tmp[0][m] "vfmacc.vf v25, fa0, v11\n\t" // tmp135a + tmp135b * 2 "vfmacc.vf v27, fa2, v11\n\t" // tmp135a + tmp135b * 8 //--------------------------------------------- "vse.v v24, (a0)\n\t" "vfmacc.vf v26, fa2, v12\n\t" // tmp024a + tmp024b * 4 + tmp024c * 8 = tmp[2][m] "vfmacc.vf v28, fa0, v12\n\t" // tmp024a + tmp024b * 16 + tmp024c + tmp024c = tmp[4][m] "vfmacc.vf v15, fa4, v11\n\t" // tmp135b * 32 + tmp135c "vse.v v26, (a2)\n\t" "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v25, fa3, v13\n\t" // tmp135a + tmp135b * 2 + tmp135c * 16 = tmp[1][m] "vfmacc.vf v27, fa1, v13\n\t" // tmp135a + tmp135b * 8 + tmp135c * 4 = tmp[3][m] "vfadd.vv v29, v7, v15\n\t" // r07 + tmp135a + tmp135b * 32 + tmp135c "vse.v v25, (a1)\n\t" "vse.v v27, (a3)\n\t" "vse.v v29, (a5)\n\t" "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 6\n\t" // m = 6 "slli t1, %5, 4\n\t" // t1 = out_w6 * 8 * 2bytes "vle.v v16, (%6)\n\t" // load 8 channel bias data "3:\n\t" // shape : [6 * 8] * [6 * 8] = [6 * 6] "mv a0, %1\n\t" "addi a1, a0, 16\n\t" "addi a2, a1, 16\n\t" "addi a3, a2, 16\n\t" "addi a4, a3, 16\n\t" "addi a5, a4, 16\n\t" "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vle.v v6, (t5)\n\t" // tmp[m][6] "addi t5, t5, 16\n\t" "vle.v v7, (t5)\n\t" // tmp[m][7] "addi t5, t5, 16\n\t" //--------------------------------------------- "vfadd.vv v8, v1, v2\n\t" // tmp[m][1] + tmp[m][2] = tmp024a "vfsub.vv v9, v1, v2\n\t" // tmp[m][1] - tmp[m][2] = tmp135a "vfadd.vv v10, v3, v4\n\t" // tmp[m][3] + tmp[m][4] = tmp024b "vfsub.vv v11, v3, v4\n\t" // tmp[m][3] - tmp[m][4] = tmp135b "vfadd.vv v12, v5, v6\n\t" // tmp[m][5] + tmp[m][6] = tmp024c "vfsub.vv v13, v5, v6\n\t" // tmp[m][5] - tmp[m][6] = tmp135c "vfadd.vv v0, v0, v8\n\t" // tmp[m][0] + tmp024a "vfadd.vv v7, v7, v9\n\t" // tmp[m][7] + tmp135a "vmv.v.v v14, v10\n\t" // v14 = tmp024b "vmv.v.v v26, v8\n\t" // v26 = tmp024a "vmv.v.v v28, v8\n\t" // v28 = tmp024a "vfmacc.vf v26, fa1, v10\n\t" // tmp024a + tmp024b * 4 "vfmacc.vf v14, fa4, v12\n\t" // tmp024b + tmp024c * 32 "vfmacc.vf v28, fa3, v10\n\t" // tmp024a + tmp024b * 16 "vmv.v.v v15, v13\n\t" // v15 = tmp135c "vmv.v.v v25, v9\n\t" // v25 = tmp135a "vmv.v.v v27, v9\n\t" // v27 = tmp135a "vfadd.vv v24, v0, v14\n\t" // tmp[m][0] + tmp024a + tmp024b + tmp024c * 32 = tmp[0][m] "vfmacc.vf v25, fa0, v11\n\t" // tmp135a + tmp135b * 2 "vfmacc.vf v27, fa2, v11\n\t" // tmp135a + tmp135b * 8 //--------------------------------------------- "vfadd.vv v24, v24, v16\n\t" // + bias "vfmacc.vf v26, fa2, v12\n\t" // tmp024a + tmp024b * 4 + tmp024c * 8 = tmp[2][m] "vfmacc.vf v28, fa0, v12\n\t" // tmp024a + tmp024b * 16 + tmp024c + tmp024c = tmp[4][m] "vfmacc.vf v15, fa4, v11\n\t" // tmp135b * 32 + tmp135c "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa3, v13\n\t" // tmp135a + tmp135b * 2 + tmp135c * 16 = tmp[1][m] "vfmacc.vf v27, fa1, v13\n\t" // tmp135a + tmp135b * 8 + tmp135c * 4 = tmp[3][m] "vfadd.vv v26, v26, v16\n\t" // + bias "vfadd.vv v28, v28, v16\n\t" // + bias "vfadd.vv v29, v7, v15\n\t" // tmp[m][7] + tmp135a + tmp135b * 32 + tmp135c "vse.v v26, (a2)\n\t" "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfadd.vv v25, v25, v16\n\t" // + bias "vfadd.vv v27, v27, v16\n\t" // + bias "vfadd.vv v29, v29, v16\n\t" // + bias "vse.v v25, (a1)\n\t" "vse.v v27, (a3)\n\t" "vse.v v29, (a5)\n\t" "add %1, %1, t1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(output0_tm_0), // %0 "=r"(output0), // %1 "=r"(tmp1), // %2 "=r"(ratio_ptr), // %3 "=r"(tiles), // %4 "=r"(out_w6), // %5 "=r"(bias_tmp) // %6 :"0"(output0_tm_0), "1"(output0), "2"(tmp1), "3"(ratio_ptr), "4"(tiles), "5"(out_w6), "6"(bias_tmp) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v24", "v25", "v26", "v27", "v28", "v29", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "fa0", "fa1", "fa2", "fa3", "fa4" ); } } csi_mem_free(tmp1); } csi_mem_free(output_dot_buf); // crop the output after transform: cut extra part (right , bottom) csi_c906_crop_output_pack8to1_fp16(output_tm1_buf, output_data, out_c, out_h, out_w, block_h * 6, block_w * 6); output_data += output_size; csi_mem_free(output_tm1_buf); } if (!flag_bias) { csi_mem_free(bias_data); bias_data = NULL; } return CSINN_TRUE; } void csi_c906_conv3x3s1_winograd43_transform_kernel_pack8_fp16(struct csi_tensor *o_kernel, struct csi_tensor *t_kernel) { int32_t outch = o_kernel->dim[0]; int32_t inch = o_kernel->dim[1]; __fp16 *kernel_data = (__fp16 *)o_kernel->data; // for kernel transform buf, 3x3 --> 6x6 __fp16 *kernel_tm = (__fp16 *)csi_mem_alloc(outch * inch * 6 * 6 * sizeof(__fp16)); // kernel transform matrix: G const __fp16 ktm[6][3] = { { 1.0f/4, 0.0f, 0.0f}, { -1.0f/6, -1.0f/6, -1.0f/6}, { -1.0f/6, 1.0f/6, -1.0f/6}, { 1.0f/24, 1.0f/12, 1.0f/6}, { 1.0f/24, -1.0f/12, 1.0f/6}, { 0.0f, 0.0f, 1.0f} }; csi_tensor_copy(t_kernel, o_kernel); for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const __fp16* kernel0 = kernel_data + p * inch * 9 + q * 9; __fp16* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const __fp16 *k0 = kernel0; const __fp16 *k1 = kernel0 + 3; const __fp16 *k2 = kernel0 + 6; // h : first compute the transport matrix tmp = (g * GT)T __fp16 tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { __fp16* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // [O, I, 6, 6] --> [O/4, 6*6, I, 4] __fp16 *kernel_tm_pack4 = (__fp16 *)csi_mem_alloc(outch * inch * 6 * 6 * sizeof(__fp16)); t_kernel->data = kernel_tm_pack4; for (int oc = 0; oc < outch / 8; oc++) { __fp16 *g0 = kernel_tm_pack4 + oc * 36 * inch * 8; const __fp16 *k0 = kernel_tm + oc * 36 * inch * 8; const __fp16 *k1 = k0 + 36 * inch; const __fp16 *k2 = k1 + 36 * inch; const __fp16 *k3 = k2 + 36 * inch; const __fp16 *k4 = k3 + 36 * inch; const __fp16 *k5 = k4 + 36 * inch; const __fp16 *k6 = k5 + 36 * inch; const __fp16 *k7 = k6 + 36 * inch; for (int k = 0; k < 36; k++) { __fp16 *g00 = g0 + k * inch * 8; for (int ic = 0; ic < inch / 8; ic++) { for (int i = 0; i < 8; i++) { const __fp16 *k00 = k0 + (ic * 8 + i) * 36; const __fp16 *k10 = k1 + (ic * 8 + i) * 36; const __fp16 *k20 = k2 + (ic * 8 + i) * 36; const __fp16 *k30 = k3 + (ic * 8 + i) * 36; const __fp16 *k40 = k4 + (ic * 8 + i) * 36; const __fp16 *k50 = k5 + (ic * 8 + i) * 36; const __fp16 *k60 = k6 + (ic * 8 + i) * 36; const __fp16 *k70 = k7 + (ic * 8 + i) * 36; g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } csi_mem_free(kernel_tm); } int csi_c906_conv3x3s1_winograd43_pack8_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { __fp16 *input_data = (__fp16 *)input->data; __fp16 *output_data = (__fp16 *)output->data; __fp16 *kernel_data = (__fp16 *)params->conv_extra.kernel_tm->data; __fp16 *bias_data = (__fp16 *)bias->data; // param int kernel_h = kernel->dim[2]; int kernel_w = kernel->dim[3]; int stride_h = params->stride_height; int stride_w = params->stride_width; int dilation_h = params->dilation_height; int dilation_w = params->dilation_width; int pad_left = params->pad_left; int pad_top = params->pad_top; int batch = input->dim[0]; int in_c = input->dim[1]; int in_h = input->dim[2]; int in_w = input->dim[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = kernel->dim[0]; int out_h = output->dim[2]; int out_w = output->dim[3]; int output_size = out_c * out_h * out_w; // winograd param int block_h = (out_h + 3) / 4; int block_w = (out_w + 3) / 4; int padded_in_h = block_h * 4 + 2; // block * 4 for alignment with 4,kernel = 3 * 3, stride = 1,thus input_size + 2 int padded_in_w = block_w * 4 + 2; int padded_in_hw = padded_in_h * padded_in_w; // element size after padding per channel /****************************** bias *****************************/ bool flag_bias = 1; // default: conv2d layer include bias if (bias_data == NULL) { flag_bias = 0; bias_data = (__fp16 *)csi_mem_alloc(out_c * sizeof(__fp16)); } for(int n = 0; n < batch; n++) { // pad buffer: [in_c/4 h w 4] __fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * padded_in_hw * sizeof(__fp16)); // pad input csi_c906_pad_input_pack1to8_fp16(input_data, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_top, pad_left); input_data += input_size; // input transform buffer1: [in_ch/4, 36, blocks, 6] __fp16 *input_tm1_buf = (__fp16 *)csi_mem_alloc(in_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); /****************************** transform input *****************************/ /* BT = { { 4 0 -5 0 1 0 }; { 0 -4 -4 1 1 0 }; { 0 4 -4 -1 1 0 }; { 0 -2 -1 2 1 0 }; { 0 2 -1 -2 1 0 }; { 0 4 0 -5 0 1 } }; */ int tiles = block_h * block_w; for(int q = 0; q < in_c / 4; q++) { __fp16 *img0 = input_padd_buf + q * padded_in_h * padded_in_w * 8; // feature map after padding - q channel __fp16 *img0_tm = input_tm1_buf + q * 36 * tiles * 8; // transform and interleave - q channel __fp16 *tmp = (__fp16 *)csi_mem_alloc(6 * 6 * 8 * sizeof(__fp16)); for(int i = 0; i < block_h; i++) { for(int j = 0; j < block_w; j++) { __fp16 *r0 = img0 + (i * padded_in_w * 4 + j * 4) * 8; // feature map after padding 6*6 start addr __fp16 *r0_tm = img0_tm + (i * block_w + j) * 8; // input_tm1 6*6 block start addr __fp16 ratio[] = {4, -4, 2, -2, -5}; // note: in fact cannot be output constrain __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 6\n\t" // m = 6 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = padded_in_w * 8 * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 4 "flh fa1, 2(%3)\n\t" // fa1 = -4 "flh fa2, 4(%3)\n\t" // fa2 = 2 "flh fa3, 6(%3)\n\t" // fa3 = -2 "flh fa4, 8(%3)\n\t" // fa4 = -5 "1:\n\t" "mv s1, %0\n\t" // s1 = r00 addr "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 96\n\t" // tmp[1][m] "addi a2, a1, 96\n\t" // tmp[2][m] "addi a3, a2, 96\n\t" // tmp[3][m] "addi a4, a3, 96\n\t" // tmp[4][m] "addi a5, a4, 96\n\t" // tmp[5][m] "vle.v v0, (s1)\n\t" // r00 "addi s1, s1, 16\n\t" "vle.v v1, (s1)\n\t" // r01 "addi s1, s1, 16\n\t" "vle.v v2, (s1)\n\t" // r02 "addi s1, s1, 16\n\t" "vle.v v3, (s1)\n\t" // r03 "addi s1, s1, 16\n\t" "vle.v v4, (s1)\n\t" // r04 "addi s1, s1, 16\n\t" "vle.v v5, (s1)\n\t" // r05 "addi s1, s1, 16\n\t" "vmv.v.v v24, v4\n\t" "vmv.v.v v29, v5\n\t" //--------------------------------------------- "vfmacc.vf v24, fa0, v0\n\t" // r04 + 4 * r00 "vfmacc.vf v24, fa4, v2\n\t" // r04 + 4 * r00 - 5 * r02 "vse.v v24, (a0)\n\t" //--------------------------------------------- "vfadd.vv v25, v3, v4\n\t" // r03 + r04 "vfadd.vv v6, v1, v2\n\t" // r01 + r02 "vfmacc.vf v25, fa1, v6\n\t" // r03 + r04 - 4 * (r01 - r02) "vse.v v25, (a1)\n\t" //--------------------------------------------- "vfsub.vv v26, v4, v3\n\t" // r04 - r03 "vfsub.vv v7, v1, v2\n\t" // r01 - r02 "vfmacc.vf v26, fa0, v7\n\t" // r04 - r03 + 4 * (r01 - r02) "vse.v v26, (a2)\n\t" //--------------------------------------------- "vfsub.vv v8, v1, v3\n\t" // r01 - r03 "vfsub.vv v27, v4, v2\n\t" // r04 - r02 "vfsub.vv v28, v4, v2\n\t" // r04 - r02 "vfmacc.vf v27, fa3, v8\n\t" // r04 - r02 - 2 * (r01 - r03) "vse.v v27, (a3)\n\t" "vfmacc.vf v28, fa2, v8\n\t" // r04 - r02 + 2 * (r01 - r03) "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v29, fa0, v1\n\t" // r05 + 4 * r01 "vfmacc.vf v29, fa4, v3\n\t" // r05 + 4 * r01 - 5 * r03 "vse.v v29, (a5)\n\t" //--------------------------------------------- "add %0, %0, t1\n\t" // padding feature map 6*6 next line addr "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 6\n\t" // m = 6 "slli t1, %5, 4\n\t" // t1 = tiles * 8 * 2 bytes "mulw t2, t0, t1\n\t" // t2 = tiles * 6 blocks * 8 channels * 2 bytes "3:\n\t" "mv a0, %1\n\t" // r0_tm_0 "add a1, a0, t1\n\t" // r0_tm_1 "add a2, a1, t1\n\t" // r0_tm_2 "add a3, a2, t1\n\t" // r0_tm_3 "add a4, a3, t1\n\t" // r0_tm_4 "add a5, a4, t1\n\t" // r0_tm_5 "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vmv.v.v v24, v4\n\t" "vmv.v.v v29, v5\n\t" //--------------------------------------------- "vfmacc.vf v24, fa0, v0\n\t" // r04 + 4 * r00 "vfmacc.vf v24, fa4, v2\n\t" // r04 * 4 * r00 - 5 * r02 "vse.v v24, (a0)\n\t" //--------------------------------------------- "vfadd.vv v25, v3, v4\n\t" // r03 + r04 "vfadd.vv v6, v1, v2\n\t" // r01 + r02 "vfmacc.vf v25, fa1, v6\n\t" // r03 + r04 - 4 * (r01 - r02) "vse.v v25, (a1)\n\t" //--------------------------------------------- "vfsub.vv v26, v4, v3\n\t" // r04 - r03 "vfsub.vv v7, v1, v2\n\t" // r01 - r02 "vfmacc.vf v26, fa0, v7\n\t" // r04 - r03 + 4 * (r01 - r02) "vse.v v26, (a2)\n\t" //--------------------------------------------- "vfsub.vv v8, v1, v3\n\t" // r01 - r03 "vfsub.vv v27, v4, v2\n\t" // r04 - r02 "vfsub.vv v28, v4, v2\n\t" // r04 - r02 "vfmacc.vf v27, fa3, v8\n\t" // r04 - r02 - 2 * (r01 - r03) "vse.v v27, (a3)\n\t" "vfmacc.vf v28, fa2, v8\n\t" // r04 - r02 + 2 * (r01 - r03) "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v29, fa0, v1\n\t" // r05 + 4 * r01 "vfmacc.vf v29, fa4, v3\n\t" // r05 + 4 * r01 - 5 * r03 "vse.v v29, (a5)\n\t" //--------------------------------------------- "add %1, %1, t2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(r0), // %0 "=r"(r0_tm), // %1 "=r"(tmp), // %2 "=r"(ratio_ptr), // %3 "=r"(padded_in_w), // %4 "=r"(tiles) // %5 :"0"(r0), "1"(r0_tm), "2"(tmp), "3"(ratio_ptr), "4"(padded_in_w), "5"(tiles) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v24", "v25", "v26", "v27", "v28", "v29", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5" ); } } csi_mem_free(tmp); } csi_mem_free(input_padd_buf); /*********************************** dot ***************************************/ // reorder input_tm1_buf __fp16 *input_tm2_buf = (__fp16 *)csi_mem_alloc(36 * tiles * in_c * sizeof(__fp16)); for (int r = 0; r < 36; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // input_tm2 r channel data int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; //----------------- for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8 * 1]; tm2[2] = tm1[l + 8 * 2]; tm2[3] = tm1[l + 8 * 3]; tm2[4] = tm1[l + 8 * 4]; tm2[5] = tm1[l + 8 * 5]; tm2[6] = tm1[l + 8 * 6]; tm2[7] = tm1[l + 8 * 7]; tm2 += 8; } tm1 += 36 * tiles * 8; } } for (; t + 3 < tiles; t += 4) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8 * 1]; tm2[2] = tm1[l + 8 * 2]; tm2[3] = tm1[l + 8 * 3]; tm2 += 4; } tm1 += 36 * tiles * 8; } } for (; t + 1 < tiles; t += 2) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8]; tm2 += 2; } tm1 += 36 * tiles * 8; } } for (; t < tiles; t++) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2++; } tm1 += 36 * tiles * 8; } } } csi_mem_free(input_tm1_buf); // output_dot_buf: [out_c/4, 36, blocks, 4] __fp16 *output_dot_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); for (int p = 0; p < out_c / 8; p++) { __fp16 *output0_tm = output_dot_buf + p * 36 * tiles * 8; // 8 channel dot output __fp16 *kernel0_tm = kernel_data + p * 36 * in_c * 8; // 8 channel kernel for (int r = 0; r < 36; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // img_tm2 第r个channel int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" "vmv.v.x v4, zero\n\t" "vmv.v.x v5, zero\n\t" "vmv.v.x v6, zero\n\t" "vmv.v.x v7, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "flh fa4, 8(%0)\n\t" "flh fa5, 10(%0)\n\t" "flh fa6, 12(%0)\n\t" "flh fa7, 14(%0)\n\t" "addi %0, %0, 16\n\t" "vle.v v8, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v8\n\t" "vfmacc.vf v1, fa1, v8\n\t" "vfmacc.vf v2, fa2, v8\n\t" "vfmacc.vf v3, fa3, v8\n\t" "vfmacc.vf v4, fa4, v8\n\t" "vfmacc.vf v5, fa5, v8\n\t" "vfmacc.vf v6, fa6, v8\n\t" "vfmacc.vf v7, fa7, v8\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v4, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v5, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v6, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v7, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", "t0" ); } for (; t + 3 < tiles; t += 4) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "addi %0, %0, 8\n\t" "vle.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v4\n\t" "vfmacc.vf v1, fa1, v4\n\t" "vfmacc.vf v2, fa2, v4\n\t" "vfmacc.vf v3, fa3, v4\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "fa0", "fa1", "fa2", "fa3", "t0" ); } for (; t + 1 < tiles; t += 2) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "addi %0, %0, 4\n\t" "vle.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v2\n\t" "vfmacc.vf v1, fa1, v2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "fa0", "fa1", "t0" ); } for (; t < tiles; t++) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" // clear "1:\n\t" "flw fa0, (%0)\n\t" "addi %0, %0, 2\n\t" "vle.v v1, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "fa0", "t0" ); } } } csi_mem_free(input_tm2_buf); /*************************** transform output ****************************/ // output_tm1_buf: [out_c/4, out_h4, out_w4, 4] __fp16 *output_tm1_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 4 * 4 * sizeof(__fp16)); /* AT = { { 1 1 1 1 1 0 }, { 0 1 -1 2 -2 0 }, { 0 1 1 4 4 0 }, { 0 1 -1 8 -8 1 } }; */ for (int p = 0; p < out_c / 8; p++) { __fp16 *bias_tmp = bias_data + p * 8; __fp16 *out0_tm = output_dot_buf + p * 36 * block_h * block_w * 8; // 输出转换前/dot后 第p个channel __fp16 *out0 = output_tm1_buf + p * 4*block_h * 4*block_w * 8; // 转换后输出 第p个channel __fp16 *tmp1 = (__fp16 *)csi_mem_alloc(4 * 6 * 8 * sizeof(__fp16)); int out_w4 = block_w * 4; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { __fp16 *output0_tm_0 = out0_tm + (i * block_w + j) * 8; // 6*6 起始地址 __fp16 *output0 = out0 + (i * block_w * 4 * 4 + j * 4) * 8; // 输出 4*4 的起始地址 __fp16 ratio[] = {2.0, 4.0, 8.0}; __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 6\n\t" // m = 6 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = tiles * 8 * 2 "mulw t2, t0, t1\n\t" // t2 = tiles * 6 blocks * 8 channels * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 2 "flh fa1, 2(%3)\n\t" // fa1 = 4 "flh fa2, 4(%3)\n\t" // fa2 = 8 "mv s1, %0\n\t" "1:\n\t" // shape : [4 * 6] * [6 * 6] = [4 * 6] "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 96\n\t" // tmp[1][m] "addi a2, a1, 96\n\t" // tmp[2][m] "addi a3, a2, 96\n\t" // tmp[3][m] "vle.v v0, (s1)\n\t" // r00 "add s1, s1, t1\n\t" "vle.v v1, (s1)\n\t" // r01 "add s1, s1, t1\n\t" "vle.v v2, (s1)\n\t" // r02 "add s1, s1, t1\n\t" "vle.v v3, (s1)\n\t" // r03 "add s1, s1, t1\n\t" "vle.v v4, (s1)\n\t" // r04 "add s1, s1, t1\n\t" "vle.v v5, (s1)\n\t" // r05 "add s1, s1, t1\n\t" //--------------------------------------------- "vfadd.vv v26, v1, v2\n\t" // r01 + r02 = tmp02a "vfsub.vv v6, v1, v2\n\t" // r01 - r02 = tmp13a "vfadd.vv v7, v3, v4\n\t" // r03 + r04 = tmp02b "vfsub.vv v8, v3, v4\n\t" // r03 - r04 = tmp13b "vmv.v.v v25, v6\n\t" // v25 = tmp13a //--------------------------------------------- "vfadd.vv v24, v0, v26\n\t" // r00 + tmp02a "vfadd.vv v24, v24, v7\n\t" // r00 + tmp02a + tmp02b "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa0, v8\n\t" // tmp13a + 2 * tmp13b "vse.v v25, (a1)\n\t" "vfmacc.vf v26, fa1, v7\n\t" // tmp02a + 4 * tmp02b "vse.v v26, (a2)\n\t" "vfadd.vv v27, v5, v6\n\t" // r05 + tmp13a "vfmacc.vf v27, fa2, v8\n\t" // r05 + tmp13a * 8 tmp13b "vse.v v27, (a3)\n\t" //--------------------------------------------- "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 4\n\t" // m = 4 "slli t1, %5, 4\n\t" // t1 = out_w4 * 8 * 2 bytes "vle.v v16, (%6)\n\t" // load 8 channel bias data "3:\n\t" // shape : [4 * 6] * [6 * 4] = [4 * 4] "mv a0, %1\n\t" "addi a1, a0, 16\n\t" "addi a2, a1, 16\n\t" "addi a3, a2, 16\n\t" "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" //--------------------------------------------- "vfadd.vv v26, v1, v2\n\t" // r01 + r02 = tmp02a "vfsub.vv v6, v1, v2\n\t" // r01 - r02 = tmp13a "vfadd.vv v7, v3, v4\n\t" // r03 + r04 = tmp02b "vfsub.vv v8, v3, v4\n\t" // r03 - r04 = tmp13b "vmv.v.v v25, v6\n\t" // v25 = tmp13a //--------------------------------------------- "vfadd.vv v24, v0, v26\n\t" // r00 + tmp02a "vfadd.vv v24, v24, v7\n\t" // r00 + tmp02a + tmp02b "vfadd.vv v24, v24, v16\n\t" // add bias "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa0, v8\n\t" // tmp13a + 2 * tmp13b "vfadd.vv v25, v25, v16\n\t" // add bias "vse.v v25, (a1)\n\t" "vfmacc.vf v26, fa1, v7\n\t" // tmp02a + 4 * tmp02b "vfadd.vv v26, v26, v16\n\t" // add bias "vse.v v26, (a2)\n\t" "vfadd.vv v27, v5, v6\n\t" // r05 + tmp13a "vfmacc.vf v27, fa2, v8\n\t" // r05 + tmp13a * 8 tmp13b "vfadd.vv v27, v27, v16\n\t" // add bias "vse.v v27, (a3)\n\t" "add %1, %1, t1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(output0_tm_0), // %0 "=r"(output0), // %1 "=r"(tmp1), // %2 "=r"(ratio_ptr), // %3 "=r"(tiles), // %4 "=r"(out_w4), // %5 "=r"(bias_tmp) // %6 :"0"(output0_tm_0), "1"(output0), "2"(tmp1), "3"(ratio_ptr), "4"(tiles), "5"(out_w4), "6"(bias_tmp) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v24", "v25", "v26", "v27", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "fa0", "fa1", "fa2" ); } } csi_mem_free(tmp1); } csi_mem_free(output_dot_buf); // crop the output after transform: cut extra part (right , bottom) csi_c906_crop_output_pack8to1_fp16(output_tm1_buf, output_data, out_c, out_h, out_w, block_h * 4, block_w * 4); output_data += output_size; csi_mem_free(output_tm1_buf); } if (!flag_bias) { csi_mem_free(bias_data); bias_data = NULL; } return CSINN_TRUE; } void csi_c906_conv3x3s1_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { /* to do */ } void csi_c906_conv3x3s2_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { /* to do */ }
/* CSI-NN2 version 1.12.x */ /* the conditions for using winograd convolution in_channel >= 16 out_channel >= 16 input_height <= 120 input_width <= 120 */ #include "csi_c906.h" /* padding input for winograd input transform , and change memory layout to [n c/8 h w 8] input layout: [n c h w] input_padded layout: [n c/8 h w 8] constrain: input channel % 8 = 0 */ void csi_c906_pad_input_pack1to8_fp16(const __fp16 *input, __fp16 *input_padded, int inc, int inh, int inw, int padded_h, int padded_w, int pad_top, int pad_left) { int inc8 = inc / 8; int padded_hw = padded_h * padded_w; __fp16 *pad_ptr = input_padded; __fp16 *inp_ptr = (__fp16 *)input; int resi_h = padded_h - pad_top - inh; // remain to pad on h (pad_down) int resi_w = padded_w - pad_left - inw; // remain to pad on w (pad_right) asm volatile( "vsetvli zero, zero, e16, m1\n\t" "vmv.v.x v2, zero\n\t" // clear v2, for memset value 0 "mulw t1, %6, %7\n\t" // pad_top * padded_w "mulw t2, %6, %9\n\t" // pad_down * padded_w "mulw t0, %3, %4\n\t" // input_size per_channel "slli t0, t0, 1\n\t" // load stride = input_size * 2 "slli t6, t0, 3\n\t" // t6 = input_size * 8 * 2 "1:\n\t" // channel loop [inc/8] "mv a0, %0\n\t" // update input_addr "mv t5, %3\n\t" // t5 = in_h "beqz %7, 3f\n\t" // if pad_top = 0 "mv t3, t1\n\t" // t3 = num to memset "2:\n\t" // pad h_top "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 2b\n\t" "3:\n\t" // pad h_mid "mv t4, %4\n\t" // t4 = in_w "beqz %8, 5f\n\t" // if pad_left = 0 "mv t3, %8\n\t" // t3 = pad_left "4:\n\t" // pad w_left "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 4b\n\t" "5:\n\t" // pad w_mid "vlse.v v4, (a0), t0\n\t" "addi a0, a0, 2\n\t" "vse.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t4, t4, -1\n\t" "bnez t4, 5b\n\t" "beqz %10, 7f\n\t" // if pad_right = 0 "mv t3, %10\n\t" // t3 = pad_right "6:\n\t" // pad w_right "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 6b\n\t" "7:\n\t" "addi t5, t5, -1\n\t" "bnez t5, 3b\n\t" "beqz %9, 9f\n\t" // if pad_down = 0 "mv t3, t2\n\t" // t3 = num to memset 0 "8:\n\t" // pad h_down "vse.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "addi t3, t3, -1\n\t" "bnez t3, 8b\n\t" "9:\n\t" "add %0, %0, t6\n\t" // input_data jump to next 8 channel "addi %2, %2, -1\n\t" "bnez %2, 1b\n\t" :"=r"(inp_ptr), // %0 "=r"(pad_ptr), // %1 "=r"(inc8), // %2 "=r"(inh), // %3 "=r"(inw), // %4 "=r"(padded_hw), // %5 "=r"(padded_w), // %6 "=r"(pad_top), // %7 "=r"(pad_left), // %8 "=r"(resi_h), // %9 "=r"(resi_w) // %10 :"0"(inp_ptr), "1"(pad_ptr), "2"(inc8), "3"(inh), "4"(inw), "5"(padded_hw), "6"(padded_w), "7"(pad_top), "8"(pad_left), "9"(resi_h), "10"(resi_w) :"cc", "memory", "v2", "v4", "a0", "t0", "t1", "t2", "t3", "t4", "t5", "t6" ); } void csi_c906_crop_output_pack8to1_fp16(const __fp16 *output_trans, __fp16 *output, int out_c, int out_h, int out_w, int wino_h, int wino_w) { int out_c8 = out_c / 8; __fp16 *out_tm_ptr = (__fp16 *)output_trans; __fp16 *out_ptr = output; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mulw t0, %3, %4\n\t" // output_size per_channel "slli t0, t0, 1\n\t" // store_stride = output_size * 2 "slli t3, t0, 3\n\t" // t3 = output_size * 8 * 2 "slli t4, %6, 4\n\t" // t4 = wino_w * 8 * 2 "mulw t5, %5, %6\n\t" // crop_size per_channel "slli t5, t5, 4\n\t" // t5 = crop_size * 8 * 2 "1:\n\t" // channel loop [out_ch / 8] "mv a1, %1\n\t" // update output_addr "mv a0, %0\n\t" // update crop_addr per-channel "mv t1, %3\n\t" // t1 = out_h "2:\n\t" // crop h "mv t2, %4\n\t" // t2 = out_w "mv s1, a0\n\t" // update crop_addr per-row "3:\n\t" // crop w "vle.v v2, (s1)\n\t" "addi s1, s1, 16\n\t" "vsse.v v2, (a1), t0\n\t" "addi a1, a1, 2\n\t" "addi t2, t2, -1\n\t" "bnez t2, 3b\n\t" "add a0, a0, t4\n\t" // crop-data jump to next row "addi t1, t1, -1\n\t" "bnez t1, 2b\n\t" "4:\n\t" "add %1, %1, t3\n\t" // output_data jump to next 8 channel "add %0, %0, t5\n\t" // crop-data jump to next 8 channel "addi %2, %2, -1\n\t" "bnez %2, 1b\n\t" :"=r"(out_tm_ptr), // %0 "=r"(out_ptr), // %1 "=r"(out_c8), // %2 "=r"(out_h), // %3 "=r"(out_w), // %4 "=r"(wino_h), // %5 "=r"(wino_w) // %6 :"0"(out_tm_ptr), "1"(out_ptr), "2"(out_c8), "3"(out_h), "4"(out_w), "5"(wino_h), "6"(wino_w) :"cc", "memory", "v2", "v3", "a0", "a1", "s1", "t0", "t1", "t2", "t3", "t4", "t5" ); } /* constrain: output channel % 8 = 0 input channel % 8 = 0 kernel before: [O I 3*3] kernel after : [O/8 8*8 I 8] */ void csi_c906_conv3x3s1_winograd64_transform_kernel_pack8_fp16(struct csi_tensor *o_kernel, struct csi_tensor *t_kernel) { int32_t outch = o_kernel->dim[0]; int32_t inch = o_kernel->dim[1]; __fp16 *kernel_data = (__fp16 *)o_kernel->data; // for kernel transform buf, 3x3 --> 8x8 __fp16 *kernel_tm = (__fp16 *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(__fp16)); // kernel transform matrix: G const __fp16 ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; // const __fp16 ktm[8][3] = { // {1.0f, 0.0f, 0.0f}, // {-2.0f / 9, -2.0f / 9, -2.0f / 9}, // {-2.0f / 9, 2.0f / 9, -2.0f / 9}, // {1.0f / 90, 1.0f / 45, 2.0f / 45}, // {1.0f / 90, -1.0f / 45, 2.0f / 45}, // {32.0f / 45, 16.0f / 45, 8.0f / 45}, // {32.0f / 45, -16.0f / 45, 8.0f / 45}, // {0.0f, 0.0f, 1.0f} // }; csi_tensor_copy(t_kernel, o_kernel); for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const __fp16* kernel0 = kernel_data + p * inch * 9 + q * 9; __fp16* kernel_tmp = kernel_tm + p * inch * 64 + q * 64; // transform kernel const __fp16 *k0 = kernel0; const __fp16 *k1 = kernel0 + 3; const __fp16 *k2 = kernel0 + 6; // h : first compute the transport matrix tmp = (g * GT)T __fp16 tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 8; j++) { __fp16* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tmp[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd64 __fp16 *kernel_tm_pack8 = (__fp16 *)csi_mem_alloc(outch * inch * 8 * 8 * sizeof(__fp16)); t_kernel->data = kernel_tm_pack8; for (int oc = 0; oc < outch / 8; oc++) { __fp16 *g0 = kernel_tm_pack8 + oc * 64 * inch * 8; const __fp16 *k0 = kernel_tm + oc * 64 * inch * 8; const __fp16 *k1 = k0 + 64 * inch; const __fp16 *k2 = k1 + 64 * inch; const __fp16 *k3 = k2 + 64 * inch; const __fp16 *k4 = k3 + 64 * inch; const __fp16 *k5 = k4 + 64 * inch; const __fp16 *k6 = k5 + 64 * inch; const __fp16 *k7 = k6 + 64 * inch; for (int k = 0; k < 64; k++) { __fp16 *g00 = g0 + k * inch * 8; for (int ic = 0; ic < inch / 8; ic++) { for (int i = 0; i < 8; i++) { const __fp16 *k00 = k0 + (ic * 8 + i) * 64; const __fp16 *k10 = k1 + (ic * 8 + i) * 64; const __fp16 *k20 = k2 + (ic * 8 + i) * 64; const __fp16 *k30 = k3 + (ic * 8 + i) * 64; const __fp16 *k40 = k4 + (ic * 8 + i) * 64; const __fp16 *k50 = k5 + (ic * 8 + i) * 64; const __fp16 *k60 = k6 + (ic * 8 + i) * 64; const __fp16 *k70 = k7 + (ic * 8 + i) * 64; g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } csi_mem_free(kernel_tm); } /* constrain: output channel % 8 = 0 input channel % 8 = 0 */ int csi_c906_conv3x3s1_winograd64_pack8_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { // uint64_t start_time, end_time; // start_time = csi_get_timespec(); __fp16 *input_data = (__fp16 *)input->data; __fp16 *output_data = (__fp16 *)output->data; __fp16 *kernel_data = (__fp16 *)params->conv_extra.kernel_tm->data; __fp16 *bias_data = (__fp16 *)bias->data; // param int kernel_h = kernel->dim[2]; int kernel_w = kernel->dim[3]; int stride_h = params->stride_height; int stride_w = params->stride_width; int dilation_h = params->dilation_height; int dilation_w = params->dilation_width; int pad_left = params->pad_left; int pad_top = params->pad_top; int batch = input->dim[0]; int in_c = input->dim[1]; int in_h = input->dim[2]; int in_w = input->dim[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = kernel->dim[0]; int out_h = output->dim[2]; int out_w = output->dim[3]; int output_size = out_c * out_h * out_w; // winograd param int block_h = (out_h + 5) / 6; int block_w = (out_w + 5) / 6; int padded_in_h = block_h * 6 + 2; // block * 4 for alignment with 4,kernel = 3 * 3 ,stride = 1,thus input_size + 2 int padded_in_w = block_w * 6 + 2; int padded_in_hw = padded_in_h * padded_in_w; // element size after padding per channel /****************************** bias *****************************/ bool flag_bias = 1; // default: conv2d layer include bias if (bias_data == NULL) { flag_bias = 0; bias_data = (__fp16 *)csi_mem_alloc(out_c * sizeof(__fp16)); } for(int n = 0; n < batch; n++) { // pad buffer: [in_c/8 h w 8] __fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * padded_in_hw * sizeof(__fp16)); // pad input csi_c906_pad_input_pack1to8_fp16(input_data, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_top, pad_left); input_data += input_size; // input transform buffer1: [in_ch/8, 64, blocks, 8] __fp16 *input_tm1_buf = (__fp16 *)csi_mem_alloc(in_c * block_h * block_w * 8 * 8 * sizeof(__fp16)); /****************************** transform input *****************************/ /* BT = { { 1 0 -5.25 0 5.25 0 -1 0 }; { 0 1 1 -4.25 -4.25 1 1 0 }; { 0 -1 1 4.25 -4.25 -1 1 0 }; { 0 0.5 0.25 -2.5 -1.25 2 1 0 }; { 0 -0.5 0.25 2.5 -1.25 -2 1 0 }; { 0 2 4 -2.5 -5 0.5 1 0 }; { 0 -2 4 2.5 -5 -0.5 1 0 }; { 0 -1 0 5.25 0 -5.25 0 1 } }; */ // int in_h_tm = block_h * 8; // input height after transform // int in_w_tm = block_w * 8; int tiles = block_h * block_w; #pragma omp parallel for num_threads(1) for(int q = 0; q < in_c / 8; q++) { __fp16 *img0 = input_padd_buf + q * padded_in_h * padded_in_w * 8; // feature map after padding - q channel __fp16 *img0_tm = input_tm1_buf + q * 64 * tiles * 8; // transform and interleave - q channel __fp16 *tmp = (__fp16 *)csi_mem_alloc(8 * 8 * 8 * sizeof(__fp16)); // __fp16 tmp[512] = {0.0}; // ?????? for(int i = 0; i < block_h; i++) { for(int j = 0; j < block_w; j++) { __fp16 *r0 = img0 + (i * padded_in_w * 6 + j * 6) * 8; // feature map after padding 8*8 start addr __fp16 *r0_tm = img0_tm + (i * block_w + j) * 8; // input_tm1 8*8 block start addr __fp16 ratio[] = {5.25, -4.25, 0.25, -1.25, 4.0, 0.5, -2.5, 2.0}; // note: in fact cannot be output constrain __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 8\n\t" // m = 8 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = padded_in_w * 8 * 2bytes "flh fa0, 0(%3)\n\t" // fa0 = 5.25 "flh fa1, 2(%3)\n\t" // fa1 = -4.25 "flh fa2, 4(%3)\n\t" // fa2 = 0.25 "flh fa3, 6(%3)\n\t" // fa3 = -1.25 "flh fa4, 8(%3)\n\t" // fa4 = 4.0 "flh fa5, 10(%3)\n\t" // fa5 = 0.5 "flh fa6, 12(%3)\n\t" // fa6 = -2.5 "flh fa7, 14(%3)\n\t" // fa7 = 2.0 "1:\n\t" "mv s1, %0\n\t" // s1 = r00 addr "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 128\n\t" // tmp[1][m] "addi a2, a1, 128\n\t" // tmp[2][m] "addi a3, a2, 128\n\t" // tmp[3][m] "addi a4, a3, 128\n\t" // tmp[4][m] "addi a5, a4, 128\n\t" // tmp[5][m] "addi a6, a5, 128\n\t" // tmp[6][m] "addi a7, a6, 128\n\t" // tmp[7][m] "vle.v v0, (s1)\n\t" // r00 "addi s1, s1, 16\n\t" "vle.v v1, (s1)\n\t" // r01 "addi s1, s1, 16\n\t" "vle.v v2, (s1)\n\t" // r02 "addi s1, s1, 16\n\t" "vle.v v3, (s1)\n\t" // r03 "addi s1, s1, 16\n\t" "vle.v v4, (s1)\n\t" // r04 "addi s1, s1, 16\n\t" "vle.v v5, (s1)\n\t" // r05 "addi s1, s1, 16\n\t" "vle.v v6, (s1)\n\t" // r06 "addi s1, s1, 16\n\t" "vle.v v7, (s1)\n\t" // r07 "addi s1, s1, 16\n\t" "vmv.v.v v10, v6\n\t" //--------------------------------------------- "vfsub.vv v8, v4, v2\n\t" // r04 - r02 "vfsub.vv v9, v3, v5\n\t" // r03 - r05 "vfsub.vv v24, v0, v6\n\t" // r00 - r06 "vfsub.vv v31, v7, v1\n\t" // r07 - r01 "vfmacc.vf v10, fa2, v2\n\t" // r06 + r02 * 0.25f "vfmul.vf v11, v1, fa5\n\t" // r01 * 0.5f "vfmul.vf v12, v1, fa7\n\t" // r01 * 2.0f "vfmacc.vf v24, fa0, v8\n\t" // r00 - r06 + 5.25 * (r04 - r02) = tmp[0][m] "vfmacc.vf v31, fa0, v9\n\t" // r07 - r01 + 5.25 * (r03 - r05) = tmp[7][m] //--------------------------------------------- "vfadd.vv v8, v2, v6\n\t" // r02 + r06 "vfadd.vv v9, v1, v5\n\t" // r01 + r05 "vfmacc.vf v11, fa6, v3\n\t" // r01 * 0.5f - r03 * 2.5f "vfmacc.vf v12, fa6, v3\n\t" // r01 * 2.f - r03 * 2.5f "vfmacc.vf v2, fa3, v4\n\t" // r02 - r04 * 1.25f 注意 "vfmacc.vf v10, fa3, v4\n\t" // r06 + r02 * 0.25f - r04 * 1.25f = tmp34a "vfmacc.vf v8, fa1, v4\n\t" // r02 + r06 - r04 * 4.25f = tmp12a "vfmacc.vf v9, fa1, v3\n\t" // r01 + r05 - r03 * 4.25f = tmp12b "vfmacc.vf v11, fa7, v5\n\t" // r01 * 0.5f - r03 * 2.5f + r05 * 2.0 = tmp34b "vfmacc.vf v12, fa5, v5\n\t" // r01 * 2.f - r03 * 2.5f + r05 * 0.5 = tmp56b "vse.v v24, (a0)\n\t" "vse.v v31, (a7)\n\t" "vfadd.vv v25, v8, v9\n\t" // tmp12a + tmp12b = tmp[1][m] "vfsub.vv v26, v8, v9\n\t" // tmp12a - tmp12b = tmp[2][m] //--------------------------------------------- "vfmacc.vf v6, fa4, v2\n\t" // r06 + (r02 - r04 * 1.25f) * 4 = tmp56a "vfadd.vv v27, v10, v11\n\t" // tmp34a + tmp34b = tmp[3][m] "vfsub.vv v28, v10, v11\n\t" // tmp34a - tmp34b = tmp[4][m] "vfadd.vv v29, v6, v12\n\t" // tmp56a + tmp56b = tmp[5][m] "vfsub.vv v30, v6, v12\n\t" // tmp56a - tmp56b = tmp[6][m] "vse.v v25, (a1)\n\t" "vse.v v26, (a2)\n\t" "vse.v v27, (a3)\n\t" "vse.v v28, (a4)\n\t" "vse.v v29, (a5)\n\t" "vse.v v30, (a6)\n\t" //--------------------------------------------- "add %0, %0, t1\n\t" // padding feature map 8*8 next line addr "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 8\n\t" // m = 8 "slli t1, %5, 4\n\t" // t1 = tiles * 8 * 2 bytes "slli t2, %5, 7\n\t" // t2 = tiles * 8 * 8 * 2 bytes "3:\n\t" "mv a0, %1\n\t" // r0_tm_0 "add a1, a0, t1\n\t" // r0_tm_1 "add a2, a1, t1\n\t" // r0_tm_2 "add a3, a2, t1\n\t" // r0_tm_3 "add a4, a3, t1\n\t" // r0_tm_4 "add a5, a4, t1\n\t" // r0_tm_5 "add a6, a5, t1\n\t" // r0_tm_6 "add a7, a6, t1\n\t" // r0_tm_7 "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vle.v v6, (t5)\n\t" // tmp[m][6] "addi t5, t5, 16\n\t" "vle.v v7, (t5)\n\t" // tmp[m][7] "addi t5, t5, 16\n\t" "vmv.v.v v10, v6\n\t" //--------------------------------------------- "vfsub.vv v8, v4, v2\n\t" // tmp04 - tmp02 (tmp[m][4] - tmp[m][2]) "vfsub.vv v9, v3, v5\n\t" // tmp03 - tmp05 "vfsub.vv v24, v0, v6\n\t" // tmp00 - tmp06 "vfsub.vv v31, v7, v1\n\t" // tmp07 - tmp01 "vfmacc.vf v10, fa2, v2\n\t" // tmp06 + tmp02 * 0.25f "vfmul.vf v11, v1, fa5\n\t" // tmp01 * 0.5f "vfmul.vf v12, v1, fa7\n\t" // tmp01 * 2.0f "vfmacc.vf v24, fa0, v8\n\t" // tmp00 - tmp06 + 5.25 * (tmp04 - tmp02) = r0_tm_0[m] "vfmacc.vf v31, fa0, v9\n\t" // tmp07 - tmp01 + 5.25 * (tmp03 - tmp05) = r0_tm_7[m] //--------------------------------------------- "vfadd.vv v8, v2, v6\n\t" // tmp02 + tmp06 "vfadd.vv v9, v1, v5\n\t" // tmp01 + tmp05 "vfmacc.vf v11, fa6, v3\n\t" // tmp01 * 0.5f - tmp03 * 2.5f "vfmacc.vf v12, fa6, v3\n\t" // tmp01 * 2.f - tmp03 * 2.5f "vfmacc.vf v2, fa3, v4\n\t" // tmp02 - tmp04 * 1.25f "vfmacc.vf v10, fa3, v4\n\t" // tmp06 + tmp02 * 0.25f - tmp04 * 1.25f = tmp34a "vfmacc.vf v8, fa1, v4\n\t" // tmp02 + tmp06 - tmp04 * 4.25f = tmp12a "vfmacc.vf v9, fa1, v3\n\t" // tmp01 + tmp05 - tmp03 * 4.25f = tmp12b "vfmacc.vf v11, fa7, v5\n\t" // tmp01 * 0.5f - tmp03 * 2.5f + tmp05 * 2.0 = tmp34b "vfmacc.vf v12, fa5, v5\n\t" // tmp01 * 2.f - tmp03 * 2.5f + tmp05 * 0.5 = tmp56b "vse.v v24, (a0)\n\t" "vse.v v31, (a7)\n\t" "vfadd.vv v25, v8, v9\n\t" // tmp12a + tmp12b = r0_tm_1[m] "vfsub.vv v26, v8, v9\n\t" // tmp12a - tmp12b = r0_tm_2[m] //--------------------------------------------- "vfmacc.vf v6, fa4, v2\n\t" // tmp06 + (tmp02 - tmp04 * 1.25f) * 4 = tmp56a "vfadd.vv v27, v10, v11\n\t" // tmp34a + tmp34b = r0_tm_3[m] "vfsub.vv v28, v10, v11\n\t" // tmp34a - tmp34b = r0_tm_4[m] "vfadd.vv v29, v6, v12\n\t" // tmp56a + tmp56b = r0_tm_5[m] "vfsub.vv v30, v6, v12\n\t" // tmp56a - tmp56b = r0_tm_6[m] "vse.v v25, (a1)\n\t" "vse.v v26, (a2)\n\t" "vse.v v27, (a3)\n\t" "vse.v v28, (a4)\n\t" "vse.v v29, (a5)\n\t" "vse.v v30, (a6)\n\t" "add %1, %1, t2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(r0), // %0 "=r"(r0_tm), // %1 "=r"(tmp), // %2 "=r"(ratio_ptr), // %3 "=r"(padded_in_w), // %4 "=r"(tiles) // %5 :"0"(r0), "1"(r0_tm), "2"(tmp), "3"(ratio_ptr), "4"(padded_in_w), "5"(tiles) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7" ); } } csi_mem_free(tmp); } csi_mem_free(input_padd_buf); /*********************************** dot ***************************************/ // reorder input_tm1_buf __fp16 *input_tm2_buf = (__fp16 *)csi_mem_alloc(64 * tiles * in_c * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int r = 0; r < 64; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // input_tm2 r channel data int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; //---------------------------- // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8 * 1]; // tm2[2] = tm1[l + 8 * 2]; // tm2[3] = tm1[l + 8 * 3]; // tm2[4] = tm1[l + 8 * 4]; // tm2[5] = tm1[l + 8 * 5]; // tm2[6] = tm1[l + 8 * 6]; // tm2[7] = tm1[l + 8 * 7]; // tm2 += 8; // } // tm1 += 64 * tiles * 8; // } //----------------------------- asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v2, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v3, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v4, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v5, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v6, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v7, (a0)\n\t" "vsseg8e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 128\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "a0", "t1", "t2" ); } for (; t + 3 < tiles; t += 4) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8 * 1]; // tm2[2] = tm1[l + 8 * 2]; // tm2[3] = tm1[l + 8 * 3]; // tm2 += 4; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v2, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v3, (a0)\n\t" "vsseg4e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 64\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "a0", "t1", "t2" ); } for (; t + 1 < tiles; t += 2) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2[1] = tm1[l + 8]; // tm2 += 2; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vle.v v1, (a0)\n\t" "vsseg2e.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 32\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "v1", "a0", "t1", "t2" ); } for (; t < tiles; t++) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; // for (int q = 0; q < in_c / 8; q++) { // for (int l = 0; l < 8; l++) { // tm2[0] = tm1[l]; // tm2++; // } // tm1 += 64 * tiles * 8; // } asm volatile( "vsetvli zero, zero, e16, m1\n\t" "slli t1, %2, 10\n\t" // 64 * tiles * 8 * 2 bytes "srai t2, %3, 3\n\t" // in_ch8 "1:\n\t" // in_ch loop8 "mv a0, %1\n\t" // updata tm1 addr "vle.v v0, (a0)\n\t" "addi a0, a0, 16\n\t" "vse.v v0, (%0)\n\t" "add %1, %1, t1\n\t" "addi %0, %0, 16\n\t" "addi t2, t2, -1\n\t" "bnez t2, 1b\n\t" :"=r"(tm2), // %0 "=r"(tm1), // %1 "=r"(tiles), // %2 "=r"(in_c) // %3 :"0"(tm2), "1"(tm1), "2"(tiles), "3"(in_c) :"cc", "memory", "v0", "a0", "t1", "t2" ); } } csi_mem_free(input_tm1_buf); // output_dot_buf: [out_c/8, 64, blocks, 8] __fp16 *output_dot_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 8 * 8 * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *output0_tm = output_dot_buf + p * 64 * tiles * 8; __fp16 *kernel0_tm = kernel_data + p * 64 * in_c * 8; for (int r = 0; r < 64; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // img_tm2 第r个channel int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" "vmv.v.x v4, zero\n\t" "vmv.v.x v5, zero\n\t" "vmv.v.x v6, zero\n\t" "vmv.v.x v7, zero\n\t" // clear "1:\n\t" "vle.v v8, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "flh fa4, 8(%0)\n\t" "flh fa5, 10(%0)\n\t" "flh fa6, 12(%0)\n\t" "flh fa7, 14(%0)\n\t" "addi %0, %0, 16\n\t" "vfmacc.vf v0, fa0, v8\n\t" "vfmacc.vf v1, fa1, v8\n\t" "vfmacc.vf v2, fa2, v8\n\t" "vfmacc.vf v3, fa3, v8\n\t" "vfmacc.vf v4, fa4, v8\n\t" "vfmacc.vf v5, fa5, v8\n\t" "vfmacc.vf v6, fa6, v8\n\t" "vfmacc.vf v7, fa7, v8\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v4, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v5, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v6, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v7, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", "t0" ); } for (; t + 3 < tiles; t += 4) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" // clear "1:\n\t" "vle.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "addi %0, %0, 8\n\t" "vfmacc.vf v0, fa0, v4\n\t" "vfmacc.vf v1, fa1, v4\n\t" "vfmacc.vf v2, fa2, v4\n\t" "vfmacc.vf v3, fa3, v4\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "fa0", "fa1", "fa2", "fa3", "t0" ); } for (; t + 1 < tiles; t += 2) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" // clear "1:\n\t" "vle.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "addi %0, %0, 4\n\t" "vfmacc.vf v0, fa0, v2\n\t" "vfmacc.vf v1, fa1, v2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "fa0", "fa1", "t0" ); } for (; t < tiles; t++) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c= "vmv.v.x v0, zero\n\t" // clear "1:\n\t" "vle.v v1, (%1)\n\t" "addi %1, %1, 16\n\t" "flh fa0, (%0)\n\t" "addi %0, %0, 2\n\t" "vfmacc.vf v0, fa0, v1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "fa0", "t0" ); } } } csi_mem_free(input_tm2_buf); /*************************** transform output ****************************/ // output_tm1_buf: [out_c/8, out_h6, out_w6, 8] __fp16 *output_tm1_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); /* AT = { { 1 1 1 1 1 1 1 0 }; { 0 1 -1 2 -2 1/2 -1/2 0 }; { 0 1 1 4 4 1/4 1/4 0 }; { 0 1 -1 8 -8 1/8 -1/8 0 }; { 0 1 1 16 16 1/16 1/16 0 }; { 0 1 -1 32 -32 1/32 -1/32 1 } }; AT = { { 1 1 1 1 1 32 32 0 }; { 0 1 -1 2 -2 16 -16 0 }; { 0 1 1 4 4 8 8 0 }; { 0 1 -1 8 -8 4 -4 0 }; { 0 1 1 16 16 2 2 0 }; { 0 1 -1 32 -32 1 -1 1 } }; */ #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *bias_tmp = bias_data + p * 8; __fp16 *out0_tm = output_dot_buf + p * 64 * block_h * block_w * 8; // 输出转换前/dot后 第p个channel __fp16 *out0 = output_tm1_buf + p * 6*block_h * 6*block_w * 8; // 转换后输出 第p个channel __fp16 *tmp1 = (__fp16 *)csi_mem_alloc(6 * 8 * 8 * sizeof(__fp16)); // __fp16 tmp[6][8][8]; int out_w6 = block_w * 6; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { __fp16 *output0_tm_0 = out0_tm + (i * block_w + j) * 8; // 8*8 起始地址 __fp16 *output0 = out0 + (i * block_w * 6 * 6 + j * 6) * 8; // 输出 6*6 的起始地址 __fp16 ratio[] = {2.0, 4.0, 8.0, 16.0, 32.0}; __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 8\n\t" // m = 8 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = tiles * 8 * 2 "slli t2, %4, 7\n\t" // t2 = tiles * 8 * 8 * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 2 "flh fa1, 2(%3)\n\t" // fa1 = 4 "flh fa2, 4(%3)\n\t" // fa2 = 8 "flh fa3, 6(%3)\n\t" // fa3 = 16 "flh fa4, 8(%3)\n\t" // fa4 = 32 "mv s1, %0\n\t" "1:\n\t" // shape : [6 * 8] * [8 * 8] = [6 * 8] "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 128\n\t" // tmp[1][m] "addi a2, a1, 128\n\t" // tmp[2][m] "addi a3, a2, 128\n\t" // tmp[3][m] "addi a4, a3, 128\n\t" // tmp[4][m] "addi a5, a4, 128\n\t" // tmp[5][m] "vle.v v0, (s1)\n\t" // r00 "add s1, s1, t1\n\t" "vle.v v1, (s1)\n\t" // r01 "add s1, s1, t1\n\t" "vle.v v2, (s1)\n\t" // r02 "add s1, s1, t1\n\t" "vle.v v3, (s1)\n\t" // r03 "add s1, s1, t1\n\t" "vle.v v4, (s1)\n\t" // r04 "add s1, s1, t1\n\t" "vle.v v5, (s1)\n\t" // r05 "add s1, s1, t1\n\t" "vle.v v6, (s1)\n\t" // r06 "add s1, s1, t1\n\t" "vle.v v7, (s1)\n\t" // r07 "add s1, s1, t1\n\t" //--------------------------------------------- "vfadd.vv v8, v1, v2\n\t" // r01 + r02 = tmp024a "vfsub.vv v9, v1, v2\n\t" // r01 - r02 = tmp135a "vfadd.vv v10, v3, v4\n\t" // r03 + r04 = tmp024b "vfsub.vv v11, v3, v4\n\t" // r03 - r04 = tmp135b "vfadd.vv v12, v5, v6\n\t" // r05 + r06 = tmp024c "vfsub.vv v13, v5, v6\n\t" // r05 - r06 = tmp135c "vfadd.vv v0, v0, v8\n\t" // r00 + tmp024a "vfadd.vv v7, v7, v9\n\t" // r07 + tmp135a "vmv.v.v v14, v10\n\t" // v14 = tmp024b "vmv.v.v v26, v8\n\t" // v26 = tmp024a "vmv.v.v v28, v8\n\t" // v28 = tmp024a "vfmacc.vf v26, fa1, v10\n\t" // tmp024a + tmp024b * 4 "vfmacc.vf v14, fa4, v12\n\t" // tmp024b + tmp024c * 32 "vfmacc.vf v28, fa3, v10\n\t" // tmp024a + tmp024b * 16 "vmv.v.v v15, v13\n\t" // v15 = tmp135c "vmv.v.v v25, v9\n\t" // v25 = tmp135a "vmv.v.v v27, v9\n\t" // v27 = tmp135a "vfadd.vv v24, v0, v14\n\t" // r00 + tmp024a + tmp024b + tmp024c * 32 = tmp[0][m] "vfmacc.vf v25, fa0, v11\n\t" // tmp135a + tmp135b * 2 "vfmacc.vf v27, fa2, v11\n\t" // tmp135a + tmp135b * 8 //--------------------------------------------- "vse.v v24, (a0)\n\t" "vfmacc.vf v26, fa2, v12\n\t" // tmp024a + tmp024b * 4 + tmp024c * 8 = tmp[2][m] "vfmacc.vf v28, fa0, v12\n\t" // tmp024a + tmp024b * 16 + tmp024c + tmp024c = tmp[4][m] "vfmacc.vf v15, fa4, v11\n\t" // tmp135b * 32 + tmp135c "vse.v v26, (a2)\n\t" "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v25, fa3, v13\n\t" // tmp135a + tmp135b * 2 + tmp135c * 16 = tmp[1][m] "vfmacc.vf v27, fa1, v13\n\t" // tmp135a + tmp135b * 8 + tmp135c * 4 = tmp[3][m] "vfadd.vv v29, v7, v15\n\t" // r07 + tmp135a + tmp135b * 32 + tmp135c "vse.v v25, (a1)\n\t" "vse.v v27, (a3)\n\t" "vse.v v29, (a5)\n\t" "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 6\n\t" // m = 6 "slli t1, %5, 4\n\t" // t1 = out_w6 * 8 * 2bytes "vle.v v16, (%6)\n\t" // load 8 channel bias data "3:\n\t" // shape : [6 * 8] * [6 * 8] = [6 * 6] "mv a0, %1\n\t" "addi a1, a0, 16\n\t" "addi a2, a1, 16\n\t" "addi a3, a2, 16\n\t" "addi a4, a3, 16\n\t" "addi a5, a4, 16\n\t" "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vle.v v6, (t5)\n\t" // tmp[m][6] "addi t5, t5, 16\n\t" "vle.v v7, (t5)\n\t" // tmp[m][7] "addi t5, t5, 16\n\t" //--------------------------------------------- "vfadd.vv v8, v1, v2\n\t" // tmp[m][1] + tmp[m][2] = tmp024a "vfsub.vv v9, v1, v2\n\t" // tmp[m][1] - tmp[m][2] = tmp135a "vfadd.vv v10, v3, v4\n\t" // tmp[m][3] + tmp[m][4] = tmp024b "vfsub.vv v11, v3, v4\n\t" // tmp[m][3] - tmp[m][4] = tmp135b "vfadd.vv v12, v5, v6\n\t" // tmp[m][5] + tmp[m][6] = tmp024c "vfsub.vv v13, v5, v6\n\t" // tmp[m][5] - tmp[m][6] = tmp135c "vfadd.vv v0, v0, v8\n\t" // tmp[m][0] + tmp024a "vfadd.vv v7, v7, v9\n\t" // tmp[m][7] + tmp135a "vmv.v.v v14, v10\n\t" // v14 = tmp024b "vmv.v.v v26, v8\n\t" // v26 = tmp024a "vmv.v.v v28, v8\n\t" // v28 = tmp024a "vfmacc.vf v26, fa1, v10\n\t" // tmp024a + tmp024b * 4 "vfmacc.vf v14, fa4, v12\n\t" // tmp024b + tmp024c * 32 "vfmacc.vf v28, fa3, v10\n\t" // tmp024a + tmp024b * 16 "vmv.v.v v15, v13\n\t" // v15 = tmp135c "vmv.v.v v25, v9\n\t" // v25 = tmp135a "vmv.v.v v27, v9\n\t" // v27 = tmp135a "vfadd.vv v24, v0, v14\n\t" // tmp[m][0] + tmp024a + tmp024b + tmp024c * 32 = tmp[0][m] "vfmacc.vf v25, fa0, v11\n\t" // tmp135a + tmp135b * 2 "vfmacc.vf v27, fa2, v11\n\t" // tmp135a + tmp135b * 8 //--------------------------------------------- "vfadd.vv v24, v24, v16\n\t" // + bias "vfmacc.vf v26, fa2, v12\n\t" // tmp024a + tmp024b * 4 + tmp024c * 8 = tmp[2][m] "vfmacc.vf v28, fa0, v12\n\t" // tmp024a + tmp024b * 16 + tmp024c + tmp024c = tmp[4][m] "vfmacc.vf v15, fa4, v11\n\t" // tmp135b * 32 + tmp135c "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa3, v13\n\t" // tmp135a + tmp135b * 2 + tmp135c * 16 = tmp[1][m] "vfmacc.vf v27, fa1, v13\n\t" // tmp135a + tmp135b * 8 + tmp135c * 4 = tmp[3][m] "vfadd.vv v26, v26, v16\n\t" // + bias "vfadd.vv v28, v28, v16\n\t" // + bias "vfadd.vv v29, v7, v15\n\t" // tmp[m][7] + tmp135a + tmp135b * 32 + tmp135c "vse.v v26, (a2)\n\t" "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfadd.vv v25, v25, v16\n\t" // + bias "vfadd.vv v27, v27, v16\n\t" // + bias "vfadd.vv v29, v29, v16\n\t" // + bias "vse.v v25, (a1)\n\t" "vse.v v27, (a3)\n\t" "vse.v v29, (a5)\n\t" "add %1, %1, t1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(output0_tm_0), // %0 "=r"(output0), // %1 "=r"(tmp1), // %2 "=r"(ratio_ptr), // %3 "=r"(tiles), // %4 "=r"(out_w6), // %5 "=r"(bias_tmp) // %6 :"0"(output0_tm_0), "1"(output0), "2"(tmp1), "3"(ratio_ptr), "4"(tiles), "5"(out_w6), "6"(bias_tmp) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v24", "v25", "v26", "v27", "v28", "v29", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "fa0", "fa1", "fa2", "fa3", "fa4" ); } } csi_mem_free(tmp1); } csi_mem_free(output_dot_buf); // crop the output after transform: cut extra part (right , bottom) csi_c906_crop_output_pack8to1_fp16(output_tm1_buf, output_data, out_c, out_h, out_w, block_h * 6, block_w * 6); output_data += output_size; csi_mem_free(output_tm1_buf); } if (!flag_bias) { csi_mem_free(bias_data); bias_data = NULL; } return CSINN_TRUE; } void csi_c906_conv3x3s1_winograd43_transform_kernel_pack8_fp16(struct csi_tensor *o_kernel, struct csi_tensor *t_kernel) { int32_t outch = o_kernel->dim[0]; int32_t inch = o_kernel->dim[1]; __fp16 *kernel_data = (__fp16 *)o_kernel->data; // for kernel transform buf, 3x3 --> 6x6 __fp16 *kernel_tm = (__fp16 *)csi_mem_alloc(outch * inch * 6 * 6 * sizeof(__fp16)); // kernel transform matrix: G const __fp16 ktm[6][3] = { { 1.0f/4, 0.0f, 0.0f}, { -1.0f/6, -1.0f/6, -1.0f/6}, { -1.0f/6, 1.0f/6, -1.0f/6}, { 1.0f/24, 1.0f/12, 1.0f/6}, { 1.0f/24, -1.0f/12, 1.0f/6}, { 0.0f, 0.0f, 1.0f} }; csi_tensor_copy(t_kernel, o_kernel); for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const __fp16* kernel0 = kernel_data + p * inch * 9 + q * 9; __fp16* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const __fp16 *k0 = kernel0; const __fp16 *k1 = kernel0 + 3; const __fp16 *k2 = kernel0 + 6; // h : first compute the transport matrix tmp = (g * GT)T __fp16 tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { __fp16* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // [O, I, 6, 6] --> [O/4, 6*6, I, 4] __fp16 *kernel_tm_pack4 = (__fp16 *)csi_mem_alloc(outch * inch * 6 * 6 * sizeof(__fp16)); t_kernel->data = kernel_tm_pack4; for (int oc = 0; oc < outch / 8; oc++) { __fp16 *g0 = kernel_tm_pack4 + oc * 36 * inch * 8; const __fp16 *k0 = kernel_tm + oc * 36 * inch * 8; const __fp16 *k1 = k0 + 36 * inch; const __fp16 *k2 = k1 + 36 * inch; const __fp16 *k3 = k2 + 36 * inch; const __fp16 *k4 = k3 + 36 * inch; const __fp16 *k5 = k4 + 36 * inch; const __fp16 *k6 = k5 + 36 * inch; const __fp16 *k7 = k6 + 36 * inch; for (int k = 0; k < 36; k++) { __fp16 *g00 = g0 + k * inch * 8; for (int ic = 0; ic < inch / 8; ic++) { for (int i = 0; i < 8; i++) { const __fp16 *k00 = k0 + (ic * 8 + i) * 36; const __fp16 *k10 = k1 + (ic * 8 + i) * 36; const __fp16 *k20 = k2 + (ic * 8 + i) * 36; const __fp16 *k30 = k3 + (ic * 8 + i) * 36; const __fp16 *k40 = k4 + (ic * 8 + i) * 36; const __fp16 *k50 = k5 + (ic * 8 + i) * 36; const __fp16 *k60 = k6 + (ic * 8 + i) * 36; const __fp16 *k70 = k7 + (ic * 8 + i) * 36; g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00 += 8; } } } } csi_mem_free(kernel_tm); } int csi_c906_conv3x3s1_winograd43_pack8_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { __fp16 *input_data = (__fp16 *)input->data; __fp16 *output_data = (__fp16 *)output->data; __fp16 *kernel_data = (__fp16 *)params->conv_extra.kernel_tm->data; __fp16 *bias_data = (__fp16 *)bias->data; // param int kernel_h = kernel->dim[2]; int kernel_w = kernel->dim[3]; int stride_h = params->stride_height; int stride_w = params->stride_width; int dilation_h = params->dilation_height; int dilation_w = params->dilation_width; int pad_left = params->pad_left; int pad_top = params->pad_top; int batch = input->dim[0]; int in_c = input->dim[1]; int in_h = input->dim[2]; int in_w = input->dim[3]; int input_size = in_c * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = kernel->dim[0]; int out_h = output->dim[2]; int out_w = output->dim[3]; int output_size = out_c * out_h * out_w; // winograd param int block_h = (out_h + 3) / 4; int block_w = (out_w + 3) / 4; int padded_in_h = block_h * 4 + 2; // block * 4 for alignment with 4,kernel = 3 * 3, stride = 1,thus input_size + 2 int padded_in_w = block_w * 4 + 2; int padded_in_hw = padded_in_h * padded_in_w; // element size after padding per channel /****************************** bias *****************************/ bool flag_bias = 1; // default: conv2d layer include bias if (bias_data == NULL) { flag_bias = 0; bias_data = (__fp16 *)csi_mem_alloc(out_c * sizeof(__fp16)); } for(int n = 0; n < batch; n++) { // pad buffer: [in_c/4 h w 4] __fp16 *input_padd_buf = (__fp16 *)csi_mem_alloc(in_c * padded_in_hw * sizeof(__fp16)); // pad input csi_c906_pad_input_pack1to8_fp16(input_data, input_padd_buf, in_c, in_h, in_w, padded_in_h, padded_in_w, pad_top, pad_left); input_data += input_size; // input transform buffer1: [in_ch/4, 36, blocks, 6] __fp16 *input_tm1_buf = (__fp16 *)csi_mem_alloc(in_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); /****************************** transform input *****************************/ /* BT = { { 4 0 -5 0 1 0 }; { 0 -4 -4 1 1 0 }; { 0 4 -4 -1 1 0 }; { 0 -2 -1 2 1 0 }; { 0 2 -1 -2 1 0 }; { 0 4 0 -5 0 1 } }; */ int tiles = block_h * block_w; #pragma omp parallel for num_threads(1) for(int q = 0; q < in_c / 4; q++) { __fp16 *img0 = input_padd_buf + q * padded_in_h * padded_in_w * 8; // feature map after padding - q channel __fp16 *img0_tm = input_tm1_buf + q * 36 * tiles * 8; // transform and interleave - q channel __fp16 *tmp = (__fp16 *)csi_mem_alloc(6 * 6 * 8 * sizeof(__fp16)); for(int i = 0; i < block_h; i++) { for(int j = 0; j < block_w; j++) { __fp16 *r0 = img0 + (i * padded_in_w * 4 + j * 4) * 8; // feature map after padding 6*6 start addr __fp16 *r0_tm = img0_tm + (i * block_w + j) * 8; // input_tm1 6*6 block start addr __fp16 ratio[] = {4, -4, 2, -2, -5}; // note: in fact cannot be output constrain __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 6\n\t" // m = 6 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = padded_in_w * 8 * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 4 "flh fa1, 2(%3)\n\t" // fa1 = -4 "flh fa2, 4(%3)\n\t" // fa2 = 2 "flh fa3, 6(%3)\n\t" // fa3 = -2 "flh fa4, 8(%3)\n\t" // fa4 = -5 "1:\n\t" "mv s1, %0\n\t" // s1 = r00 addr "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 96\n\t" // tmp[1][m] "addi a2, a1, 96\n\t" // tmp[2][m] "addi a3, a2, 96\n\t" // tmp[3][m] "addi a4, a3, 96\n\t" // tmp[4][m] "addi a5, a4, 96\n\t" // tmp[5][m] "vle.v v0, (s1)\n\t" // r00 "addi s1, s1, 16\n\t" "vle.v v1, (s1)\n\t" // r01 "addi s1, s1, 16\n\t" "vle.v v2, (s1)\n\t" // r02 "addi s1, s1, 16\n\t" "vle.v v3, (s1)\n\t" // r03 "addi s1, s1, 16\n\t" "vle.v v4, (s1)\n\t" // r04 "addi s1, s1, 16\n\t" "vle.v v5, (s1)\n\t" // r05 "addi s1, s1, 16\n\t" "vmv.v.v v24, v4\n\t" "vmv.v.v v29, v5\n\t" //--------------------------------------------- "vfmacc.vf v24, fa0, v0\n\t" // r04 + 4 * r00 "vfmacc.vf v24, fa4, v2\n\t" // r04 + 4 * r00 - 5 * r02 "vse.v v24, (a0)\n\t" //--------------------------------------------- "vfadd.vv v25, v3, v4\n\t" // r03 + r04 "vfadd.vv v6, v1, v2\n\t" // r01 + r02 "vfmacc.vf v25, fa1, v6\n\t" // r03 + r04 - 4 * (r01 - r02) "vse.v v25, (a1)\n\t" //--------------------------------------------- "vfsub.vv v26, v4, v3\n\t" // r04 - r03 "vfsub.vv v7, v1, v2\n\t" // r01 - r02 "vfmacc.vf v26, fa0, v7\n\t" // r04 - r03 + 4 * (r01 - r02) "vse.v v26, (a2)\n\t" //--------------------------------------------- "vfsub.vv v8, v1, v3\n\t" // r01 - r03 "vfsub.vv v27, v4, v2\n\t" // r04 - r02 "vfsub.vv v28, v4, v2\n\t" // r04 - r02 "vfmacc.vf v27, fa3, v8\n\t" // r04 - r02 - 2 * (r01 - r03) "vse.v v27, (a3)\n\t" "vfmacc.vf v28, fa2, v8\n\t" // r04 - r02 + 2 * (r01 - r03) "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v29, fa0, v1\n\t" // r05 + 4 * r01 "vfmacc.vf v29, fa4, v3\n\t" // r05 + 4 * r01 - 5 * r03 "vse.v v29, (a5)\n\t" //--------------------------------------------- "add %0, %0, t1\n\t" // padding feature map 6*6 next line addr "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 6\n\t" // m = 6 "slli t1, %5, 4\n\t" // t1 = tiles * 8 * 2 bytes "mulw t2, t0, t1\n\t" // t2 = tiles * 6 blocks * 8 channels * 2 bytes "3:\n\t" "mv a0, %1\n\t" // r0_tm_0 "add a1, a0, t1\n\t" // r0_tm_1 "add a2, a1, t1\n\t" // r0_tm_2 "add a3, a2, t1\n\t" // r0_tm_3 "add a4, a3, t1\n\t" // r0_tm_4 "add a5, a4, t1\n\t" // r0_tm_5 "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" "vmv.v.v v24, v4\n\t" "vmv.v.v v29, v5\n\t" //--------------------------------------------- "vfmacc.vf v24, fa0, v0\n\t" // r04 + 4 * r00 "vfmacc.vf v24, fa4, v2\n\t" // r04 * 4 * r00 - 5 * r02 "vse.v v24, (a0)\n\t" //--------------------------------------------- "vfadd.vv v25, v3, v4\n\t" // r03 + r04 "vfadd.vv v6, v1, v2\n\t" // r01 + r02 "vfmacc.vf v25, fa1, v6\n\t" // r03 + r04 - 4 * (r01 - r02) "vse.v v25, (a1)\n\t" //--------------------------------------------- "vfsub.vv v26, v4, v3\n\t" // r04 - r03 "vfsub.vv v7, v1, v2\n\t" // r01 - r02 "vfmacc.vf v26, fa0, v7\n\t" // r04 - r03 + 4 * (r01 - r02) "vse.v v26, (a2)\n\t" //--------------------------------------------- "vfsub.vv v8, v1, v3\n\t" // r01 - r03 "vfsub.vv v27, v4, v2\n\t" // r04 - r02 "vfsub.vv v28, v4, v2\n\t" // r04 - r02 "vfmacc.vf v27, fa3, v8\n\t" // r04 - r02 - 2 * (r01 - r03) "vse.v v27, (a3)\n\t" "vfmacc.vf v28, fa2, v8\n\t" // r04 - r02 + 2 * (r01 - r03) "vse.v v28, (a4)\n\t" //--------------------------------------------- "vfmacc.vf v29, fa0, v1\n\t" // r05 + 4 * r01 "vfmacc.vf v29, fa4, v3\n\t" // r05 + 4 * r01 - 5 * r03 "vse.v v29, (a5)\n\t" //--------------------------------------------- "add %1, %1, t2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(r0), // %0 "=r"(r0_tm), // %1 "=r"(tmp), // %2 "=r"(ratio_ptr), // %3 "=r"(padded_in_w), // %4 "=r"(tiles) // %5 :"0"(r0), "1"(r0_tm), "2"(tmp), "3"(ratio_ptr), "4"(padded_in_w), "5"(tiles) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v24", "v25", "v26", "v27", "v28", "v29", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5" ); } } csi_mem_free(tmp); } csi_mem_free(input_padd_buf); /*********************************** dot ***************************************/ // reorder input_tm1_buf __fp16 *input_tm2_buf = (__fp16 *)csi_mem_alloc(36 * tiles * in_c * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int r = 0; r < 36; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // input_tm2 r channel data int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; //----------------- for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8 * 1]; tm2[2] = tm1[l + 8 * 2]; tm2[3] = tm1[l + 8 * 3]; tm2[4] = tm1[l + 8 * 4]; tm2[5] = tm1[l + 8 * 5]; tm2[6] = tm1[l + 8 * 6]; tm2[7] = tm1[l + 8 * 7]; tm2 += 8; } tm1 += 36 * tiles * 8; } } for (; t + 3 < tiles; t += 4) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8 * 1]; tm2[2] = tm1[l + 8 * 2]; tm2[3] = tm1[l + 8 * 3]; tm2 += 4; } tm1 += 36 * tiles * 8; } } for (; t + 1 < tiles; t += 2) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2[1] = tm1[l + 8]; tm2 += 2; } tm1 += 36 * tiles * 8; } } for (; t < tiles; t++) { __fp16 *tm2 = img_tm2 + t * in_c; // img_tm2 row data __fp16 *tm1 = input_tm1_buf; tm1 += (r * tiles + t) * 8; for (int q = 0; q < in_c / 8; q++) { for (int l = 0; l < 8; l++) { tm2[0] = tm1[l]; tm2++; } tm1 += 36 * tiles * 8; } } } csi_mem_free(input_tm1_buf); // output_dot_buf: [out_c/4, 36, blocks, 4] __fp16 *output_dot_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 6 * 6 * sizeof(__fp16)); #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *output0_tm = output_dot_buf + p * 36 * tiles * 8; // 8 channel dot output __fp16 *kernel0_tm = kernel_data + p * 36 * in_c * 8; // 8 channel kernel for (int r = 0; r < 36; r++) { __fp16 *img_tm2 = input_tm2_buf + r * tiles * in_c; // img_tm2 第r个channel int t = 0; for (; t + 7 < tiles; t += 8) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" "vmv.v.x v4, zero\n\t" "vmv.v.x v5, zero\n\t" "vmv.v.x v6, zero\n\t" "vmv.v.x v7, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "flh fa4, 8(%0)\n\t" "flh fa5, 10(%0)\n\t" "flh fa6, 12(%0)\n\t" "flh fa7, 14(%0)\n\t" "addi %0, %0, 16\n\t" "vle.v v8, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v8\n\t" "vfmacc.vf v1, fa1, v8\n\t" "vfmacc.vf v2, fa2, v8\n\t" "vfmacc.vf v3, fa3, v8\n\t" "vfmacc.vf v4, fa4, v8\n\t" "vfmacc.vf v5, fa5, v8\n\t" "vfmacc.vf v6, fa6, v8\n\t" "vfmacc.vf v7, fa7, v8\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v4, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v5, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v6, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v7, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7", "t0" ); } for (; t + 3 < tiles; t += 4) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" "vmv.v.x v2, zero\n\t" "vmv.v.x v3, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "flh fa2, 4(%0)\n\t" "flh fa3, 6(%0)\n\t" "addi %0, %0, 8\n\t" "vle.v v4, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v4\n\t" "vfmacc.vf v1, fa1, v4\n\t" "vfmacc.vf v2, fa2, v4\n\t" "vfmacc.vf v3, fa3, v4\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v2, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v3, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "fa0", "fa1", "fa2", "fa3", "t0" ); } for (; t + 1 < tiles; t += 2) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" "vmv.v.x v1, zero\n\t" // clear "1:\n\t" "flh fa0, (%0)\n\t" "flh fa1, 2(%0)\n\t" "addi %0, %0, 4\n\t" "vle.v v2, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v2\n\t" "vfmacc.vf v1, fa1, v2\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" "vse.v v1, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "v2", "fa0", "fa1", "t0" ); } for (; t < tiles; t++) { __fp16 *r0 = img_tm2 + t * in_c; __fp16 *k0 = kernel0_tm + r * in_c * 8; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "mv t0, %3\n\t" // t0 = in_c "vmv.v.x v0, zero\n\t" // clear "1:\n\t" "flw fa0, (%0)\n\t" "addi %0, %0, 2\n\t" "vle.v v1, (%1)\n\t" "addi %1, %1, 16\n\t" "vfmacc.vf v0, fa0, v1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "vse.v v0, (%2)\n\t" "addi %2, %2, 16\n\t" :"=r"(r0), // %0 "=r"(k0), // %1 "=r"(output0_tm), // %2 "=r"(in_c) // %3 :"0"(r0), "1"(k0), "2"(output0_tm), "3"(in_c) :"cc", "memory", "v0", "v1", "fa0", "t0" ); } } } csi_mem_free(input_tm2_buf); /*************************** transform output ****************************/ // output_tm1_buf: [out_c/4, out_h4, out_w4, 4] __fp16 *output_tm1_buf = (__fp16 *)csi_mem_alloc(out_c * block_h * block_w * 4 * 4 * sizeof(__fp16)); /* AT = { { 1 1 1 1 1 0 }, { 0 1 -1 2 -2 0 }, { 0 1 1 4 4 0 }, { 0 1 -1 8 -8 1 } }; */ #pragma omp parallel for num_threads(1) for (int p = 0; p < out_c / 8; p++) { __fp16 *bias_tmp = bias_data + p * 8; __fp16 *out0_tm = output_dot_buf + p * 36 * block_h * block_w * 8; // 输出转换前/dot后 第p个channel __fp16 *out0 = output_tm1_buf + p * 4*block_h * 4*block_w * 8; // 转换后输出 第p个channel __fp16 *tmp1 = (__fp16 *)csi_mem_alloc(4 * 6 * 8 * sizeof(__fp16)); int out_w4 = block_w * 4; for (int i = 0; i < block_h; i++) { for (int j = 0; j < block_w; j++) { __fp16 *output0_tm_0 = out0_tm + (i * block_w + j) * 8; // 6*6 起始地址 __fp16 *output0 = out0 + (i * block_w * 4 * 4 + j * 4) * 8; // 输出 4*4 的起始地址 __fp16 ratio[] = {2.0, 4.0, 8.0}; __fp16 *ratio_ptr = ratio; asm volatile( "vsetvli zero, zero, e16, m1\n\t" "li t0, 6\n\t" // m = 6 "mv t5, %2\n\t" // t5 = tmp start addr "slli t1, %4, 4\n\t" // t1 = tiles * 8 * 2 "mulw t2, t0, t1\n\t" // t2 = tiles * 6 blocks * 8 channels * 2 bytes "flh fa0, 0(%3)\n\t" // fa0 = 2 "flh fa1, 2(%3)\n\t" // fa1 = 4 "flh fa2, 4(%3)\n\t" // fa2 = 8 "mv s1, %0\n\t" "1:\n\t" // shape : [4 * 6] * [6 * 6] = [4 * 6] "mv a0, t5\n\t" // tmp[0][m] "addi a1, a0, 96\n\t" // tmp[1][m] "addi a2, a1, 96\n\t" // tmp[2][m] "addi a3, a2, 96\n\t" // tmp[3][m] "vle.v v0, (s1)\n\t" // r00 "add s1, s1, t1\n\t" "vle.v v1, (s1)\n\t" // r01 "add s1, s1, t1\n\t" "vle.v v2, (s1)\n\t" // r02 "add s1, s1, t1\n\t" "vle.v v3, (s1)\n\t" // r03 "add s1, s1, t1\n\t" "vle.v v4, (s1)\n\t" // r04 "add s1, s1, t1\n\t" "vle.v v5, (s1)\n\t" // r05 "add s1, s1, t1\n\t" //--------------------------------------------- "vfadd.vv v26, v1, v2\n\t" // r01 + r02 = tmp02a "vfsub.vv v6, v1, v2\n\t" // r01 - r02 = tmp13a "vfadd.vv v7, v3, v4\n\t" // r03 + r04 = tmp02b "vfsub.vv v8, v3, v4\n\t" // r03 - r04 = tmp13b "vmv.v.v v25, v6\n\t" // v25 = tmp13a //--------------------------------------------- "vfadd.vv v24, v0, v26\n\t" // r00 + tmp02a "vfadd.vv v24, v24, v7\n\t" // r00 + tmp02a + tmp02b "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa0, v8\n\t" // tmp13a + 2 * tmp13b "vse.v v25, (a1)\n\t" "vfmacc.vf v26, fa1, v7\n\t" // tmp02a + 4 * tmp02b "vse.v v26, (a2)\n\t" "vfadd.vv v27, v5, v6\n\t" // r05 + tmp13a "vfmacc.vf v27, fa2, v8\n\t" // r05 + tmp13a * 8 tmp13b "vse.v v27, (a3)\n\t" //--------------------------------------------- "addi t5, t5, 16\n\t" // tmp[0][0] --> tmp[0][1] "addi t0, t0, -1\n\t" "bnez t0, 1b\n\t" "2:\n\t" "mv t5, %2\n\t" // tmp start addr "li t0, 4\n\t" // m = 4 "slli t1, %5, 4\n\t" // t1 = out_w4 * 8 * 2 bytes "vle.v v16, (%6)\n\t" // load 8 channel bias data "3:\n\t" // shape : [4 * 6] * [6 * 4] = [4 * 4] "mv a0, %1\n\t" "addi a1, a0, 16\n\t" "addi a2, a1, 16\n\t" "addi a3, a2, 16\n\t" "vle.v v0, (t5)\n\t" // tmp[m][0] "addi t5, t5, 16\n\t" "vle.v v1, (t5)\n\t" // tmp[m][1] "addi t5, t5, 16\n\t" "vle.v v2, (t5)\n\t" // tmp[m][2] "addi t5, t5, 16\n\t" "vle.v v3, (t5)\n\t" // tmp[m][3] "addi t5, t5, 16\n\t" "vle.v v4, (t5)\n\t" // tmp[m][4] "addi t5, t5, 16\n\t" "vle.v v5, (t5)\n\t" // tmp[m][5] "addi t5, t5, 16\n\t" //--------------------------------------------- "vfadd.vv v26, v1, v2\n\t" // r01 + r02 = tmp02a "vfsub.vv v6, v1, v2\n\t" // r01 - r02 = tmp13a "vfadd.vv v7, v3, v4\n\t" // r03 + r04 = tmp02b "vfsub.vv v8, v3, v4\n\t" // r03 - r04 = tmp13b "vmv.v.v v25, v6\n\t" // v25 = tmp13a //--------------------------------------------- "vfadd.vv v24, v0, v26\n\t" // r00 + tmp02a "vfadd.vv v24, v24, v7\n\t" // r00 + tmp02a + tmp02b "vfadd.vv v24, v24, v16\n\t" // add bias "vse.v v24, (a0)\n\t" "vfmacc.vf v25, fa0, v8\n\t" // tmp13a + 2 * tmp13b "vfadd.vv v25, v25, v16\n\t" // add bias "vse.v v25, (a1)\n\t" "vfmacc.vf v26, fa1, v7\n\t" // tmp02a + 4 * tmp02b "vfadd.vv v26, v26, v16\n\t" // add bias "vse.v v26, (a2)\n\t" "vfadd.vv v27, v5, v6\n\t" // r05 + tmp13a "vfmacc.vf v27, fa2, v8\n\t" // r05 + tmp13a * 8 tmp13b "vfadd.vv v27, v27, v16\n\t" // add bias "vse.v v27, (a3)\n\t" "add %1, %1, t1\n\t" "addi t0, t0, -1\n\t" "bnez t0, 3b" :"=r"(output0_tm_0), // %0 "=r"(output0), // %1 "=r"(tmp1), // %2 "=r"(ratio_ptr), // %3 "=r"(tiles), // %4 "=r"(out_w4), // %5 "=r"(bias_tmp) // %6 :"0"(output0_tm_0), "1"(output0), "2"(tmp1), "3"(ratio_ptr), "4"(tiles), "5"(out_w4), "6"(bias_tmp) :"cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v24", "v25", "v26", "v27", "t0", "t1", "t2", "t5", "s1", "a0", "a1", "a2", "a3", "fa0", "fa1", "fa2" ); } } csi_mem_free(tmp1); } csi_mem_free(output_dot_buf); // crop the output after transform: cut extra part (right , bottom) csi_c906_crop_output_pack8to1_fp16(output_tm1_buf, output_data, out_c, out_h, out_w, block_h * 4, block_w * 4); output_data += output_size; csi_mem_free(output_tm1_buf); } if (!flag_bias) { csi_mem_free(bias_data); bias_data = NULL; } return CSINN_TRUE; } void csi_c906_conv3x3s1_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { /* to do */ } void csi_c906_conv3x3s2_fp16(struct csi_tensor *input, struct csi_tensor *output, struct csi_tensor *kernel, struct csi_tensor *bias, struct conv2d_params *params) { /* to do */ }
par_gsmg.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Geometrically smooth interpolation multigrid * *****************************************************************************/ #include <stdio.h> #include <math.h> #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "_hypre_lapack.h" #ifndef ABS #define ABS(x) ((x)>0 ? (x) : -(x)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x) { HYPRE_Real temp = 0.; HYPRE_Int i; for (i=0; i<n; i++) temp = temp + x[i]*x[i]; return sqrt(temp); } static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x) { HYPRE_Int i; for (i=0; i<n; i++) x[i] = a * x[i]; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFillSmooth * - fill in smooth matrix * - this function will scale the smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int num_functions, HYPRE_Int *dof_func) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j, k, ii, index, start; HYPRE_Int num_cols_offd; HYPRE_Int num_sends; HYPRE_Int *dof_func_offd; HYPRE_Int *int_buf_data; HYPRE_Real temp; HYPRE_Real *p; HYPRE_Real *p_offd; HYPRE_Real *p_ptr; HYPRE_Real *buf_data; HYPRE_Real nm; #if 0 HYPRE_Real mx = 0., my = 1.e+10; #endif /* normalize each sample vector and divide by number of samples */ for (k=0; k<nsamples; k++) { nm = mydnrm2(n, samples+k*n); nm = 1./nm/nsamples; mydscal(n, nm, samples+k*n); } num_cols_offd = hypre_CSRMatrixNumCols(S_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd, HYPRE_MEMORY_HOST); p_ptr = p_offd; p = samples; for (k = 0; k < nsamples; k++) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) buf_data[index++] = p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, p_offd); hypre_ParCSRCommHandleDestroy(comm_handle); p = p+n; p_offd = p_offd+num_cols_offd; } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < n; i++) { for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++) { ii = S_diag_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func[ii]) { S_diag_data[j] = 0.; continue; } /* explicit zeros */ if (A_diag_data[j] == 0.) { S_diag_data[j] = 0.; continue; } temp = 0.; p = samples; for (k=0; k<nsamples; k++) { temp = temp + ABS(p[i] - p[ii]); p = p + n; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_diag_data[j] = 0.; continue; } temp = 1./temp; /* reciprocal */ #if 0 my = hypre_min(my,temp); mx = hypre_max(mx,temp); #endif S_diag_data[j] = temp; } for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { ii = S_offd_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func_offd[ii]) { S_offd_data[j] = 0.; continue; } /* explicit zeros */ if (A_offd_data[j] == 0.) { S_offd_data[j] = 0.; continue; } temp = 0.; p = samples; p_offd = p_ptr; for (k=0; k<nsamples; k++) { temp = temp + ABS(p[i] - p_offd[ii]); p = p + n; p_offd = p_offd + num_cols_offd; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_offd_data[j] = 0.; continue; } temp = 1./temp; /* reciprocal */ #if 0 my = hypre_min(my,temp); mx = hypre_max(mx,temp); #endif S_offd_data[j] = temp; } } #if 0 hypre_printf("MIN, MAX: %f %f\n", my, mx); #endif hypre_TFree(p_ptr, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixChooseThresh *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j; HYPRE_Real mx, minimax = 1.e+10; HYPRE_Real minmin; for (i=0; i<n; i++) { mx = 0.; for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++) mx = hypre_max(mx, S_diag_data[j]); for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++) mx = hypre_max(mx, S_offd_data[j]); if (mx != 0.) minimax = hypre_min(minimax, mx); } hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm); return minmin; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixThreshold *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_nonzeros_diag = A_diag_i[n]; HYPRE_Int num_nonzeros_offd = A_offd_i[n]; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; HYPRE_Int *S_offd_i; HYPRE_Int *S_offd_j; HYPRE_Real *S_offd_data; HYPRE_Int count, i, jS, jA; /* first count the number of nonzeros we will need */ count = 0; for (i=0; i<num_nonzeros_diag; i++) if (A_diag_data[i] >= thresh) count++; /* allocate vectors */ S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= thresh) { S_diag_data[jS] = A_diag_data[jA]; S_diag_j[jS] = A_diag_j[jA]; jS++; } } } S_diag_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_diag) = jS; /* free the vectors we don't need */ hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_diag) = S_diag_i; hypre_CSRMatrixJ(A_diag) = S_diag_j; hypre_CSRMatrixData(A_diag) = S_diag_data; /* * Offd part */ /* first count the number of nonzeros we will need */ count = 0; for (i=0; i<num_nonzeros_offd; i++) if (A_offd_data[i] >= thresh) count++; /* allocate vectors */ S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= thresh) { S_offd_data[jS] = A_offd_data[jA]; S_offd_j[jS] = A_offd_j[jA]; jS++; } } } S_offd_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_offd) = jS; /* free the vectors we don't need */ hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_offd) = S_offd_i; hypre_CSRMatrixJ(A_offd) = S_offd_j; hypre_CSRMatrixData(A_offd) = S_offd_data; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothVecs * - smoother depends on the level being used *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothVecs(void *data, hypre_ParCSRMatrix *A, HYPRE_Int num_sweeps, HYPRE_Int level, HYPRE_Real **SmoothVecs_p) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_ParVector *Zero; hypre_ParVector *Temp; hypre_ParVector *U; hypre_ParVector *Qtemp = NULL; HYPRE_Int i; HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int sample; HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data); HYPRE_Int ret; HYPRE_Real *datax, *bp, *p; HYPRE_Int rlx_type; HYPRE_Int smooth_type; HYPRE_Int smooth_option = 0; HYPRE_Int smooth_num_levels; HYPRE_Solver *smoother; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); HYPRE_Int num_threads; num_threads = hypre_NumThreads(); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (debug_flag >= 1) hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps, nsamples); smooth_type = hypre_ParAMGDataSmoothType(amg_data); smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data); if (smooth_num_levels > level) { smooth_option = smooth_type; smoother = hypre_ParAMGDataSmoother(amg_data); num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data); } rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0]; /* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */ /* omega = hypre_ParAMGDataOmega(amg_data)[level]; */ /* generate par vectors */ Zero = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Zero,0); hypre_ParVectorInitialize(Zero); datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero)); for (i=0; i<n_local; i++) datax[i] = 0.; Temp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Temp,0); hypre_ParVectorInitialize(Temp); datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp)); for (i=0; i<n_local; i++) datax[i] = 0.; U = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(U,0); hypre_ParVectorInitialize(U); datax = hypre_VectorData(hypre_ParVectorLocalVector(U)); if (num_threads > 1) { Qtemp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Qtemp); hypre_ParVectorSetPartitioningOwner(Qtemp,0); } /* allocate space for the vectors */ bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local, HYPRE_MEMORY_HOST); p = bp; /* generate random vectors */ for (sample=0; sample<nsamples; sample++) { for (i=0; i<n_local; i++) datax[i] = hypre_Rand() - .5; for (i=0; i<num_sweeps; i++) { if (smooth_option == 6) { HYPRE_SchwarzSolve(smoother[level], (HYPRE_ParCSRMatrix) A, (HYPRE_ParVector) Zero, (HYPRE_ParVector) U); } else { ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/, rlx_type , 0 /*rel pts*/, 1.0 /*weight*/, 1.0 /*omega*/, NULL, U, Temp, Qtemp); hypre_assert(ret == 0); } } /* copy out the solution */ for (i=0; i<n_local; i++) *p++ = datax[i]; } hypre_ParVectorDestroy(Zero); hypre_ParVectorDestroy(Temp); hypre_ParVectorDestroy(U); if (num_threads > 1) hypre_ParVectorDestroy(Qtemp); *SmoothVecs_p = bp; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothDirs replaces CreateS in AMG * - smoother depends on the level being used * - in this version, CreateSmoothVecs must be called prior to this function *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothDirs(void *data, hypre_ParCSRMatrix *A, HYPRE_Real *SmoothVecs, HYPRE_Real thresh, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; hypre_ParCSRMatrix *S; HYPRE_Real minimax; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); S = hypre_ParCSRMatrixClone(A, 0); /* Traverse S and fill in differences */ hypre_ParCSRMatrixFillSmooth( hypre_ParAMGDataNumSamples(amg_data), SmoothVecs, S, A, num_functions, dof_func); minimax = hypre_ParCSRMatrixChooseThresh(S); if (debug_flag >= 1) hypre_printf("Minimax chosen: %f\n", minimax); /* Threshold and compress */ hypre_ParCSRMatrixThreshold(S, thresh*minimax); *S_ptr = S; return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGNormalizeVecs * * Normalize the smooth vectors and also make the first vector the constant * vector * * inputs: * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * * output: * V = adjusted smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V) { HYPRE_Int i, j; HYPRE_Real nrm; /* change first vector to the constant vector */ for (i=0; i<n; i++) V[i] = 1.0; for (j=0; j<num; j++) { nrm = mydnrm2(n, &V[j*n]); mydscal(n, 1./nrm, &V[j*n]); } return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGFitVectors * * Construct interpolation weights based on fitting smooth vectors * * inputs: * ip = row number of row in P being processed (0-based) * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * nc = number of coarse grid points * ind = indices of coarse grid points (0-based) * * output: * val = interpolation weights for the coarse grid points * V = smooth vectors; first one has been changed to constant vector; * vectors have also been normalized; this is also an input *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V, HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val) { HYPRE_Real *a, *b; HYPRE_Real *ap; HYPRE_Int i, j; HYPRE_Real *work; HYPRE_Int work_size; HYPRE_Int info; HYPRE_Int temp; /* hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc); for (i=0; i<nc; i++) hypre_printf("%d ", ind[i]); hypre_printf("\n"); */ if (nc == 0) return 0; work_size = 2000*64; work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST); a = hypre_CTAlloc(HYPRE_Real, num*nc, HYPRE_MEMORY_HOST); ap = a; for (j=0; j<nc; j++) { for (i=0; i<num; i++) { *ap = V[i*n+ind[j]]; ap++; } } temp = MAX(nc, num); b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST); for (i=0; i<num; i++) b[i] = V[i*n+ip]; { char trans = 'N'; HYPRE_Int one = 1; hypre_dgels(&trans, &num, &nc, &one, a, &num, b, &temp, work, &work_size, &info); if (info != 0) hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n"); /* copy solution into output vector */ for (j=0; j<nc; j++) val[j] = b[j]; } hypre_TFree(b, HYPRE_MEMORY_HOST); hypre_TFree(a, HYPRE_MEMORY_HOST); hypre_TFree(work, HYPRE_MEMORY_HOST); return info; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpLS * * Interpolation built from fitting smooth vectors * - sequential version only *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int num_smooth, HYPRE_Real *SmoothVecs, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); /* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */ HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); /* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */ HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); /* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */ hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; //HYPRE_Real *S_ext_data; //HYPRE_Int *S_ext_i; //HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker; /* HYPRE_Int *P_marker_offd; */ HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; /* HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; */ HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; //HYPRE_BigInt *big_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1); //S_ext_i = hypre_CSRMatrixI(S_ext); //S_ext_j = hypre_CSRMatrixBigJ(S_ext); //S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { /* removed */ } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds();*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { HYPRE_Int kk; HYPRE_Int indices[1000]; /* kludge */ /* Diagonal part of P */ P_diag_i[i] = jj_counter; kk = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i1]; jj_counter++; indices[kk] = i1; kk++; } } hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs, kk, indices, &P_diag_data[P_diag_i[i]]); /* Off-Diagonal part of P */ /* undone */ } } } P_diag_i[i] = jj_counter; /* check that this is in right place for threads */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size-1); num_cols_P_offd = 1; index = P_marker[0]; for (i=1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); //hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return(0); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpGSMG * * Difference with hypre_BoomerAMGBuildInterp is that S contains values * and is used to build interpolation weights. Matrix A is not used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *tmp_map_offd = NULL; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; HYPRE_Real *S_ext_data; HYPRE_Int *S_ext_i; HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; //HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_BigInt big_i2; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int start; HYPRE_Int c_num; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; total_global_cpts = 0; /* we will set this later for the matrix in the setup */ /* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/ /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_S_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; /* Loop over ith row of S. First, the diagonal part of S */ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += S_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of S for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) sum += S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) sum += S_offd_data[jj1]; } } if (sum != 0) { distribute = S_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of S for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) P_diag_data[P_marker[i2]] += distribute * S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i2]] += distribute * S_offd_data[jj1]; } } } else { /* do nothing */ } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else { /* do nothing */ } } /*---------------------------------------------------------------- * Still looping over ith row of S. Next, loop over the * off-diagonal part of S *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += S_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = S_offd_j[jj]; for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) { /* in the diagonal block */ if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row) sum += S_ext_data[jj1]; } else { /* in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) sum += S_ext_data[jj1]; } } } if (sum != 0) { distribute = S_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */ { if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row) P_diag_data[P_marker[(HYPRE_Int)(big_i2-col_1)]] += distribute * S_ext_data[jj1]; } else { /* check to see if it is in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) P_offd_data[P_marker_offd[j]] += distribute * S_ext_data[jj1]; } } } } else { /* do nothing */ } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else { /* do nothing */ } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ sum = 0.; for (jj = jj_begin_row; jj < jj_end_row; jj++) sum += P_diag_data[jj]; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) sum += P_offd_data[jj]; for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= sum; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= sum; } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size-1); num_cols_P_offd = 1; index = P_marker[0]; for (i=1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return(0); }
/****************************************************************************** * * Geometrically smooth interpolation multigrid * *****************************************************************************/ #include <stdio.h> #include <math.h> #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "_hypre_lapack.h" #ifndef ABS #define ABS(x) ((x)>0 ? (x) : -(x)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real * x) { HYPRE_Real temp = 0.; HYPRE_Int i; for (i = 0; i < n; i++) temp = temp + x[i] * x[i]; return sqrt(temp); } static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real * x) { HYPRE_Int i; for (i = 0; i < n; i++) x[i] = a * x[i]; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFillSmooth * - fill in smooth matrix * - this function will scale the smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real * samples, hypre_ParCSRMatrix * S, hypre_ParCSRMatrix * A, HYPRE_Int num_functions, HYPRE_Int * dof_func) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j, k, ii, index, start; HYPRE_Int num_cols_offd; HYPRE_Int num_sends; HYPRE_Int *dof_func_offd; HYPRE_Int *int_buf_data; HYPRE_Real temp; HYPRE_Real *p; HYPRE_Real *p_offd; HYPRE_Real *p_ptr; HYPRE_Real *buf_data; HYPRE_Real nm; #if 0 HYPRE_Real mx = 0., my = 1.e+10; #endif /* normalize each sample vector and divide by number of samples */ for (k = 0; k < nsamples; k++) { nm = mydnrm2(n, samples + k * n); nm = 1. / nm / nsamples; mydscal(n, nm, samples + k * n); } num_cols_offd = hypre_CSRMatrixNumCols(S_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); p_offd = hypre_CTAlloc(HYPRE_Real, nsamples * num_cols_offd, HYPRE_MEMORY_HOST); p_ptr = p_offd; p = samples; for (k = 0; k < nsamples; k++) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) buf_data[index++] = p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, p_offd); hypre_ParCSRCommHandleDestroy(comm_handle); p = p + n; p_offd = p_offd + num_cols_offd; } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < n; i++) { for (j = S_diag_i[i] + 1; j < S_diag_i[i + 1]; j++) { ii = S_diag_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func[ii]) { S_diag_data[j] = 0.; continue; } /* explicit zeros */ if (A_diag_data[j] == 0.) { S_diag_data[j] = 0.; continue; } temp = 0.; p = samples; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p[ii]); p = p + n; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_diag_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_diag_data[j] = temp; } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { ii = S_offd_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func_offd[ii]) { S_offd_data[j] = 0.; continue; } /* explicit zeros */ if (A_offd_data[j] == 0.) { S_offd_data[j] = 0.; continue; } temp = 0.; p = samples; p_offd = p_ptr; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p_offd[ii]); p = p + n; p_offd = p_offd + num_cols_offd; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_offd_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_offd_data[j] = temp; } } #if 0 hypre_printf("MIN, MAX: %f %f\n", my, mx); #endif hypre_TFree(p_ptr, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixChooseThresh *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix * S) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j; HYPRE_Real mx, minimax = 1.e+10; HYPRE_Real minmin; for (i = 0; i < n; i++) { mx = 0.; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) mx = hypre_max(mx, S_diag_data[j]); for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) mx = hypre_max(mx, S_offd_data[j]); if (mx != 0.) minimax = hypre_min(minimax, mx); } hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm); return minmin; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixThreshold *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix * A, HYPRE_Real thresh) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_nonzeros_diag = A_diag_i[n]; HYPRE_Int num_nonzeros_offd = A_offd_i[n]; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; HYPRE_Int *S_offd_i; HYPRE_Int *S_offd_j; HYPRE_Real *S_offd_data; HYPRE_Int count, i, jS, jA; /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_diag; i++) if (A_diag_data[i] >= thresh) count++; /* allocate vectors */ S_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] >= thresh) { S_diag_data[jS] = A_diag_data[jA]; S_diag_j[jS] = A_diag_j[jA]; jS++; } } } S_diag_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_diag) = jS; /* free the vectors we don't need */ hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_diag) = S_diag_i; hypre_CSRMatrixJ(A_diag) = S_diag_j; hypre_CSRMatrixData(A_diag) = S_diag_data; /* * Offd part */ /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_offd; i++) if (A_offd_data[i] >= thresh) count++; /* allocate vectors */ S_offd_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] >= thresh) { S_offd_data[jS] = A_offd_data[jA]; S_offd_j[jS] = A_offd_j[jA]; jS++; } } } S_offd_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_offd) = jS; /* free the vectors we don't need */ hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_offd) = S_offd_i; hypre_CSRMatrixJ(A_offd) = S_offd_j; hypre_CSRMatrixData(A_offd) = S_offd_data; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothVecs * - smoother depends on the level being used *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothVecs(void *data, hypre_ParCSRMatrix * A, HYPRE_Int num_sweeps, HYPRE_Int level, HYPRE_Real ** SmoothVecs_p) { hypre_ParAMGData *amg_data = (hypre_ParAMGData *) data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_ParVector *Zero; hypre_ParVector *Temp; hypre_ParVector *U; hypre_ParVector *Qtemp = NULL; HYPRE_Int i; HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int sample; HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data); HYPRE_Int ret; HYPRE_Real *datax, *bp, *p; HYPRE_Int rlx_type; HYPRE_Int smooth_type; HYPRE_Int smooth_option = 0; HYPRE_Int smooth_num_levels; HYPRE_Solver *smoother; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); HYPRE_Int num_threads; num_threads = hypre_NumThreads(); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (debug_flag >= 1) hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps, nsamples); smooth_type = hypre_ParAMGDataSmoothType(amg_data); smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data); if (smooth_num_levels > level) { smooth_option = smooth_type; smoother = hypre_ParAMGDataSmoother(amg_data); num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data); } rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0]; /* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */ /* omega = hypre_ParAMGDataOmega(amg_data)[level]; */ /* generate par vectors */ Zero = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Zero, 0); hypre_ParVectorInitialize(Zero); datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero)); for (i = 0; i < n_local; i++) datax[i] = 0.; Temp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Temp, 0); hypre_ParVectorInitialize(Temp); datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp)); for (i = 0; i < n_local; i++) datax[i] = 0.; U = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(U, 0); hypre_ParVectorInitialize(U); datax = hypre_VectorData(hypre_ParVectorLocalVector(U)); if (num_threads > 1) { Qtemp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Qtemp); hypre_ParVectorSetPartitioningOwner(Qtemp, 0); } /* allocate space for the vectors */ bp = hypre_CTAlloc(HYPRE_Real, nsamples * n_local, HYPRE_MEMORY_HOST); p = bp; /* generate random vectors */ for (sample = 0; sample < nsamples; sample++) { for (i = 0; i < n_local; i++) datax[i] = hypre_Rand() - .5; for (i = 0; i < num_sweeps; i++) { if (smooth_option == 6) { HYPRE_SchwarzSolve(smoother[level], (HYPRE_ParCSRMatrix) A, (HYPRE_ParVector) Zero, (HYPRE_ParVector) U); } else { ret = hypre_BoomerAMGRelax(A, Zero, NULL /* CFmarker */ , rlx_type, 0 /* rel pts */ , 1.0 /* weight */ , 1.0 /* omega */ , NULL, U, Temp, Qtemp); hypre_assert(ret == 0); } } /* copy out the solution */ for (i = 0; i < n_local; i++) *p++ = datax[i]; } hypre_ParVectorDestroy(Zero); hypre_ParVectorDestroy(Temp); hypre_ParVectorDestroy(U); if (num_threads > 1) hypre_ParVectorDestroy(Qtemp); *SmoothVecs_p = bp; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothDirs replaces CreateS in AMG * - smoother depends on the level being used * - in this version, CreateSmoothVecs must be called prior to this function *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothDirs(void *data, hypre_ParCSRMatrix * A, HYPRE_Real * SmoothVecs, HYPRE_Real thresh, HYPRE_Int num_functions, HYPRE_Int * dof_func, hypre_ParCSRMatrix ** S_ptr) { hypre_ParAMGData *amg_data = (hypre_ParAMGData *) data; hypre_ParCSRMatrix *S; HYPRE_Real minimax; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); S = hypre_ParCSRMatrixClone(A, 0); /* Traverse S and fill in differences */ hypre_ParCSRMatrixFillSmooth( hypre_ParAMGDataNumSamples(amg_data), SmoothVecs, S, A, num_functions, dof_func); minimax = hypre_ParCSRMatrixChooseThresh(S); if (debug_flag >= 1) hypre_printf("Minimax chosen: %f\n", minimax); /* Threshold and compress */ hypre_ParCSRMatrixThreshold(S, thresh * minimax); *S_ptr = S; return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGNormalizeVecs * * Normalize the smooth vectors and also make the first vector the constant * vector * * inputs: * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * * output: * V = adjusted smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real * V) { HYPRE_Int i, j; HYPRE_Real nrm; /* change first vector to the constant vector */ for (i = 0; i < n; i++) V[i] = 1.0; for (j = 0; j < num; j++) { nrm = mydnrm2(n, &V[j * n]); mydscal(n, 1. / nrm, &V[j * n]); } return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGFitVectors * * Construct interpolation weights based on fitting smooth vectors * * inputs: * ip = row number of row in P being processed (0-based) * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * nc = number of coarse grid points * ind = indices of coarse grid points (0-based) * * output: * val = interpolation weights for the coarse grid points * V = smooth vectors; first one has been changed to constant vector; * vectors have also been normalized; this is also an input *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real * V, HYPRE_Int nc, const HYPRE_Int * ind, HYPRE_Real * val) { HYPRE_Real *a, *b; HYPRE_Real *ap; HYPRE_Int i, j; HYPRE_Real *work; HYPRE_Int work_size; HYPRE_Int info; HYPRE_Int temp; /* * hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc); * for (i=0; i<nc; i++) hypre_printf("%d ", ind[i]); hypre_printf("\n"); */ if (nc == 0) return 0; work_size = 2000 * 64; work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST); a = hypre_CTAlloc(HYPRE_Real, num * nc, HYPRE_MEMORY_HOST); ap = a; for (j = 0; j < nc; j++) { for (i = 0; i < num; i++) { *ap = V[i * n + ind[j]]; ap++; } } temp = MAX(nc, num); b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST); for (i = 0; i < num; i++) b[i] = V[i * n + ip]; { char trans = 'N'; HYPRE_Int one = 1; hypre_dgels(&trans, &num, &nc, &one, a, &num, b, &temp, work, &work_size, &info); if (info != 0) hypre_error_w_msg(HYPRE_ERROR_GENERIC, "par_gsmg: dgels returned %d\n"); /* copy solution into output vector */ for (j = 0; j < nc; j++) val[j] = b[j]; } hypre_TFree(b, HYPRE_MEMORY_HOST); hypre_TFree(a, HYPRE_MEMORY_HOST); hypre_TFree(work, HYPRE_MEMORY_HOST); return info; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpLS * * Interpolation built from fitting smooth vectors * - sequential version only *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpLS(hypre_ParCSRMatrix * A, HYPRE_Int * CF_marker, hypre_ParCSRMatrix * S, HYPRE_BigInt * num_cpts_global, HYPRE_Int num_functions, HYPRE_Int * dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int num_smooth, HYPRE_Real * SmoothVecs, hypre_ParCSRMatrix ** P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); /* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */ HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); /* * HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int * *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j * = hypre_CSRMatrixJ(S_offd); */ HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); /* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */ hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; //HYPRE_Real * S_ext_data; //HYPRE_Int * S_ext_i; //HYPRE_BigInt * S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker; /* HYPRE_Int *P_marker_offd; */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; /* * HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int * jj_end_row,jj_end_row_offd; */ HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt * fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; //HYPRE_BigInt * big_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); //S_ext_i = hypre_CSRMatrixI(S_ext); //S_ext_j = hypre_CSRMatrixBigJ(S_ext); //S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { /* removed */ } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); /* * fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, * HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), * HYPRE_MEMORY_HOST); */ for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j - 1]; size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /* * index = 0; for (i = 0; i < num_sends; i++) { start = * hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < * hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) * big_buf_data[index++] = * my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElm * t(comm_pkg,j)]; } * * comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, * fine_to_coarse_offd); * * hypre_ParCSRCommHandleDestroy(comm_handle); * * if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; * hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", * my_id, wall_time); fflush(NULL); } * * if (debug_flag==4) wall_time = time_getWallclockSeconds(); */ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl - 1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl - 1]; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { HYPRE_Int kk; HYPRE_Int indices[1000]; /* kludge */ /* Diagonal part of P */ P_diag_i[i] = jj_counter; kk = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i1]; jj_counter++; indices[kk] = i1; kk++; } } hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs, kk, indices, &P_diag_data[P_diag_i[i]]); /* Off-Diagonal part of P */ /* undone */ } } } P_diag_i[i] = jj_counter; /* check that this is in right place for * threads */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); for (i = 0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); //hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return (0); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpGSMG * * Difference with hypre_BoomerAMGBuildInterp is that S contains values * and is used to build interpolation weights. Matrix A is not used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpGSMG(hypre_ParCSRMatrix * A, HYPRE_Int * CF_marker, hypre_ParCSRMatrix * S, HYPRE_BigInt * num_cpts_global, HYPRE_Int num_functions, HYPRE_Int * dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, hypre_ParCSRMatrix ** P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *tmp_map_offd = NULL; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; HYPRE_Real *S_ext_data; HYPRE_Int *S_ext_i; HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row, jj_begin_row_offd; HYPRE_Int jj_end_row, jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; //HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_BigInt big_i2; HYPRE_Int i, i1, i2; HYPRE_Int j, jl, jj, jj1; HYPRE_Int start; HYPRE_Int c_num; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt) local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; total_global_cpts = 0; /* we will set this later for the matrix in * the setup */ /* * if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1]; * hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, * comm); */ /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl - 1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl - 1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_S_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; /* Loop over ith row of S. First, the diagonal part of S */ for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += S_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of S for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) sum += S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) sum += S_offd_data[jj1]; } } if (sum != 0) { distribute = S_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of S for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) P_diag_data[P_marker[i2]] += distribute * S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i2]] += distribute * S_offd_data[jj1]; } } } else { /* do nothing */ } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else { /* do nothing */ } } /*---------------------------------------------------------------- * Still looping over ith row of S. Next, loop over the * off-diagonal part of S *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += S_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = S_offd_j[jj]; for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) { /* in the diagonal block */ if (P_marker[(HYPRE_Int) (big_i2 - col_1)] >= jj_begin_row) sum += S_ext_data[jj1]; } else { /* in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) sum += S_ext_data[jj1]; } } } if (sum != 0) { distribute = S_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */ { if (P_marker[(HYPRE_Int) (big_i2 - col_1)] >= jj_begin_row) P_diag_data[P_marker[(HYPRE_Int) (big_i2 - col_1)]] += distribute * S_ext_data[jj1]; } else { /* * check to see if it is in the * off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) P_offd_data[P_marker_offd[j]] += distribute * S_ext_data[jj1]; } } } } else { /* do nothing */ } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else { /* do nothing */ } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ sum = 0.; for (jj = jj_begin_row; jj < jj_end_row; jj++) sum += P_diag_data[jj]; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) sum += P_offd_data[jj]; for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= sum; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= sum; } strong_f_marker--; P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); for (i = 0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return (0); }
/****************************************************************************** * * Geometrically smooth interpolation multigrid * *****************************************************************************/ #include <stdio.h> #include <math.h> #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "_hypre_lapack.h" #ifndef ABS #define ABS(x) ((x)>0 ? (x) : -(x)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real * x) { HYPRE_Real temp = 0.; HYPRE_Int i; for (i = 0; i < n; i++) temp = temp + x[i] * x[i]; return sqrt(temp); } static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real * x) { HYPRE_Int i; for (i = 0; i < n; i++) x[i] = a * x[i]; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFillSmooth * - fill in smooth matrix * - this function will scale the smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real * samples, hypre_ParCSRMatrix * S, hypre_ParCSRMatrix * A, HYPRE_Int num_functions, HYPRE_Int * dof_func) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j, k, ii, index, start; HYPRE_Int num_cols_offd; HYPRE_Int num_sends; HYPRE_Int *dof_func_offd; HYPRE_Int *int_buf_data; HYPRE_Real temp; HYPRE_Real *p; HYPRE_Real *p_offd; HYPRE_Real *p_ptr; HYPRE_Real *buf_data; HYPRE_Real nm; #if 0 HYPRE_Real mx = 0., my = 1.e+10; #endif /* normalize each sample vector and divide by number of samples */ for (k = 0; k < nsamples; k++) { nm = mydnrm2(n, samples + k * n); nm = 1. / nm / nsamples; mydscal(n, nm, samples + k * n); } num_cols_offd = hypre_CSRMatrixNumCols(S_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); p_offd = hypre_CTAlloc(HYPRE_Real, nsamples * num_cols_offd, HYPRE_MEMORY_HOST); p_ptr = p_offd; p = samples; for (k = 0; k < nsamples; k++) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) buf_data[index++] = p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, p_offd); hypre_ParCSRCommHandleDestroy(comm_handle); p = p + n; p_offd = p_offd + num_cols_offd; } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < n; i++) { for (j = S_diag_i[i] + 1; j < S_diag_i[i + 1]; j++) { ii = S_diag_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func[ii]) { S_diag_data[j] = 0.; continue; } /* explicit zeros */ if (A_diag_data[j] == 0.) { S_diag_data[j] = 0.; continue; } temp = 0.; p = samples; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p[ii]); p = p + n; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_diag_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_diag_data[j] = temp; } for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) { ii = S_offd_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func_offd[ii]) { S_offd_data[j] = 0.; continue; } /* explicit zeros */ if (A_offd_data[j] == 0.) { S_offd_data[j] = 0.; continue; } temp = 0.; p = samples; p_offd = p_ptr; for (k = 0; k < nsamples; k++) { temp = temp + ABS(p[i] - p_offd[ii]); p = p + n; p_offd = p_offd + num_cols_offd; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_offd_data[j] = 0.; continue; } temp = 1. / temp; /* reciprocal */ #if 0 my = hypre_min(my, temp); mx = hypre_max(mx, temp); #endif S_offd_data[j] = temp; } } #if 0 hypre_printf("MIN, MAX: %f %f\n", my, mx); #endif hypre_TFree(p_ptr, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixChooseThresh *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix * S) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j; HYPRE_Real mx, minimax = 1.e+10; HYPRE_Real minmin; for (i = 0; i < n; i++) { mx = 0.; for (j = S_diag_i[i]; j < S_diag_i[i + 1]; j++) mx = hypre_max(mx, S_diag_data[j]); for (j = S_offd_i[i]; j < S_offd_i[i + 1]; j++) mx = hypre_max(mx, S_offd_data[j]); if (mx != 0.) minimax = hypre_min(minimax, mx); } hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm); return minmin; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixThreshold *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix * A, HYPRE_Real thresh) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_nonzeros_diag = A_diag_i[n]; HYPRE_Int num_nonzeros_offd = A_offd_i[n]; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; HYPRE_Int *S_offd_i; HYPRE_Int *S_offd_j; HYPRE_Real *S_offd_data; HYPRE_Int count, i, jS, jA; /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_diag; i++) if (A_diag_data[i] >= thresh) count++; /* allocate vectors */ S_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++) { if (A_diag_data[jA] >= thresh) { S_diag_data[jS] = A_diag_data[jA]; S_diag_j[jS] = A_diag_j[jA]; jS++; } } } S_diag_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_diag) = jS; /* free the vectors we don't need */ hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_diag) = S_diag_i; hypre_CSRMatrixJ(A_diag) = S_diag_j; hypre_CSRMatrixData(A_diag) = S_diag_data; /* * Offd part */ /* first count the number of nonzeros we will need */ count = 0; for (i = 0; i < num_nonzeros_offd; i++) if (A_offd_data[i] >= thresh) count++; /* allocate vectors */ S_offd_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++) { if (A_offd_data[jA] >= thresh) { S_offd_data[jS] = A_offd_data[jA]; S_offd_j[jS] = A_offd_j[jA]; jS++; } } } S_offd_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_offd) = jS; /* free the vectors we don't need */ hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_offd) = S_offd_i; hypre_CSRMatrixJ(A_offd) = S_offd_j; hypre_CSRMatrixData(A_offd) = S_offd_data; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothVecs * - smoother depends on the level being used *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothVecs(void *data, hypre_ParCSRMatrix * A, HYPRE_Int num_sweeps, HYPRE_Int level, HYPRE_Real ** SmoothVecs_p) { hypre_ParAMGData *amg_data = (hypre_ParAMGData *) data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_ParVector *Zero; hypre_ParVector *Temp; hypre_ParVector *U; hypre_ParVector *Qtemp = NULL; HYPRE_Int i; HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int sample; HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data); HYPRE_Int ret; HYPRE_Real *datax, *bp, *p; HYPRE_Int rlx_type; HYPRE_Int smooth_type; HYPRE_Int smooth_option = 0; HYPRE_Int smooth_num_levels; HYPRE_Solver *smoother; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); HYPRE_Int num_threads; num_threads = hypre_NumThreads(); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (debug_flag >= 1) hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps, nsamples); smooth_type = hypre_ParAMGDataSmoothType(amg_data); smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data); if (smooth_num_levels > level) { smooth_option = smooth_type; smoother = hypre_ParAMGDataSmoother(amg_data); num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data); } rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0]; /* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */ /* omega = hypre_ParAMGDataOmega(amg_data)[level]; */ /* generate par vectors */ Zero = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Zero, 0); hypre_ParVectorInitialize(Zero); datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero)); for (i = 0; i < n_local; i++) datax[i] = 0.; Temp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Temp, 0); hypre_ParVectorInitialize(Temp); datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp)); for (i = 0; i < n_local; i++) datax[i] = 0.; U = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(U, 0); hypre_ParVectorInitialize(U); datax = hypre_VectorData(hypre_ParVectorLocalVector(U)); if (num_threads > 1) { Qtemp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Qtemp); hypre_ParVectorSetPartitioningOwner(Qtemp, 0); } /* allocate space for the vectors */ bp = hypre_CTAlloc(HYPRE_Real, nsamples * n_local, HYPRE_MEMORY_HOST); p = bp; /* generate random vectors */ for (sample = 0; sample < nsamples; sample++) { for (i = 0; i < n_local; i++) datax[i] = hypre_Rand() - .5; for (i = 0; i < num_sweeps; i++) { if (smooth_option == 6) { HYPRE_SchwarzSolve(smoother[level], (HYPRE_ParCSRMatrix) A, (HYPRE_ParVector) Zero, (HYPRE_ParVector) U); } else { ret = hypre_BoomerAMGRelax(A, Zero, NULL /* CFmarker */ , rlx_type, 0 /* rel pts */ , 1.0 /* weight */ , 1.0 /* omega */ , NULL, U, Temp, Qtemp); hypre_assert(ret == 0); } } /* copy out the solution */ for (i = 0; i < n_local; i++) *p++ = datax[i]; } hypre_ParVectorDestroy(Zero); hypre_ParVectorDestroy(Temp); hypre_ParVectorDestroy(U); if (num_threads > 1) hypre_ParVectorDestroy(Qtemp); *SmoothVecs_p = bp; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothDirs replaces CreateS in AMG * - smoother depends on the level being used * - in this version, CreateSmoothVecs must be called prior to this function *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothDirs(void *data, hypre_ParCSRMatrix * A, HYPRE_Real * SmoothVecs, HYPRE_Real thresh, HYPRE_Int num_functions, HYPRE_Int * dof_func, hypre_ParCSRMatrix ** S_ptr) { hypre_ParAMGData *amg_data = (hypre_ParAMGData *) data; hypre_ParCSRMatrix *S; HYPRE_Real minimax; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); S = hypre_ParCSRMatrixClone(A, 0); /* Traverse S and fill in differences */ hypre_ParCSRMatrixFillSmooth( hypre_ParAMGDataNumSamples(amg_data), SmoothVecs, S, A, num_functions, dof_func); minimax = hypre_ParCSRMatrixChooseThresh(S); if (debug_flag >= 1) hypre_printf("Minimax chosen: %f\n", minimax); /* Threshold and compress */ hypre_ParCSRMatrixThreshold(S, thresh * minimax); *S_ptr = S; return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGNormalizeVecs * * Normalize the smooth vectors and also make the first vector the constant * vector * * inputs: * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * * output: * V = adjusted smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real * V) { HYPRE_Int i, j; HYPRE_Real nrm; /* change first vector to the constant vector */ for (i = 0; i < n; i++) V[i] = 1.0; for (j = 0; j < num; j++) { nrm = mydnrm2(n, &V[j * n]); mydscal(n, 1. / nrm, &V[j * n]); } return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGFitVectors * * Construct interpolation weights based on fitting smooth vectors * * inputs: * ip = row number of row in P being processed (0-based) * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * nc = number of coarse grid points * ind = indices of coarse grid points (0-based) * * output: * val = interpolation weights for the coarse grid points * V = smooth vectors; first one has been changed to constant vector; * vectors have also been normalized; this is also an input *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real * V, HYPRE_Int nc, const HYPRE_Int * ind, HYPRE_Real * val) { HYPRE_Real *a, *b; HYPRE_Real *ap; HYPRE_Int i, j; HYPRE_Real *work; HYPRE_Int work_size; HYPRE_Int info; HYPRE_Int temp; /* * hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc); * for (i=0; i<nc; i++) hypre_printf("%d ", ind[i]); hypre_printf("\n"); */ if (nc == 0) return 0; work_size = 2000 * 64; work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST); a = hypre_CTAlloc(HYPRE_Real, num * nc, HYPRE_MEMORY_HOST); ap = a; for (j = 0; j < nc; j++) { for (i = 0; i < num; i++) { *ap = V[i * n + ind[j]]; ap++; } } temp = MAX(nc, num); b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST); for (i = 0; i < num; i++) b[i] = V[i * n + ip]; { char trans = 'N'; HYPRE_Int one = 1; hypre_dgels(&trans, &num, &nc, &one, a, &num, b, &temp, work, &work_size, &info); if (info != 0) hypre_error_w_msg(HYPRE_ERROR_GENERIC, "par_gsmg: dgels returned %d\n"); /* copy solution into output vector */ for (j = 0; j < nc; j++) val[j] = b[j]; } hypre_TFree(b, HYPRE_MEMORY_HOST); hypre_TFree(a, HYPRE_MEMORY_HOST); hypre_TFree(work, HYPRE_MEMORY_HOST); return info; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpLS * * Interpolation built from fitting smooth vectors * - sequential version only *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpLS(hypre_ParCSRMatrix * A, HYPRE_Int * CF_marker, hypre_ParCSRMatrix * S, HYPRE_BigInt * num_cpts_global, HYPRE_Int num_functions, HYPRE_Int * dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int num_smooth, HYPRE_Real * SmoothVecs, hypre_ParCSRMatrix ** P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); /* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */ HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); /* * HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int * *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j * = hypre_CSRMatrixJ(S_offd); */ HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); /* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */ hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; //HYPRE_Real * S_ext_data; //HYPRE_Int * S_ext_i; //HYPRE_BigInt * S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker; /* HYPRE_Int *P_marker_offd; */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; /* * HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int * jj_end_row,jj_end_row_offd; */ HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt * fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; //HYPRE_BigInt * big_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); //S_ext_i = hypre_CSRMatrixI(S_ext); //S_ext_j = hypre_CSRMatrixBigJ(S_ext); //S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { /* removed */ } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); /* * fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, * HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), * HYPRE_MEMORY_HOST); */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j - 1]; size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /* * index = 0; for (i = 0; i < num_sends; i++) { start = * hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < * hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) * big_buf_data[index++] = * my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElm * t(comm_pkg,j)]; } * * comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, * fine_to_coarse_offd); * * hypre_ParCSRCommHandleDestroy(comm_handle); * * if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; * hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", * my_id, wall_time); fflush(NULL); } * * if (debug_flag==4) wall_time = time_getWallclockSeconds(); */ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl - 1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl - 1]; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { HYPRE_Int kk; HYPRE_Int indices[1000]; /* kludge */ /* Diagonal part of P */ P_diag_i[i] = jj_counter; kk = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i1]; jj_counter++; indices[kk] = i1; kk++; } } hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs, kk, indices, &P_diag_data[P_diag_i[i]]); /* Off-Diagonal part of P */ /* undone */ } } } P_diag_i[i] = jj_counter; /* check that this is in right place for * threads */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); //hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return (0); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpGSMG * * Difference with hypre_BoomerAMGBuildInterp is that S contains values * and is used to build interpolation weights. Matrix A is not used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpGSMG(hypre_ParCSRMatrix * A, HYPRE_Int * CF_marker, hypre_ParCSRMatrix * S, HYPRE_BigInt * num_cpts_global, HYPRE_Int num_functions, HYPRE_Int * dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, hypre_ParCSRMatrix ** P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *tmp_map_offd = NULL; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; HYPRE_Real *S_ext_data; HYPRE_Int *S_ext_i; HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row, jj_begin_row_offd; HYPRE_Int jj_end_row, jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; //HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_BigInt big_i2; HYPRE_Int i, i1, i2; HYPRE_Int j, jl, jj, jj1; HYPRE_Int start; HYPRE_Int c_num; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt) local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[0]; total_global_cpts = 0; /* we will set this later for the matrix in * the setup */ /* * if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1]; * hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, * comm); */ /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 1); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl - 1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl - 1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_S_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; /* Loop over ith row of S. First, the diagonal part of S */ for (jj = S_diag_i[i]; jj < S_diag_i[i + 1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += S_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of S for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) sum += S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) sum += S_offd_data[jj1]; } } if (sum != 0) { distribute = S_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of S for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1 + 1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) P_diag_data[P_marker[i2]] += distribute * S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1 + 1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i2]] += distribute * S_offd_data[jj1]; } } } else { /* do nothing */ } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else { /* do nothing */ } } /*---------------------------------------------------------------- * Still looping over ith row of S. Next, loop over the * off-diagonal part of S *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i + 1]; jj++) { i1 = S_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += S_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = S_offd_j[jj]; for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) { /* in the diagonal block */ if (P_marker[(HYPRE_Int) (big_i2 - col_1)] >= jj_begin_row) sum += S_ext_data[jj1]; } else { /* in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) sum += S_ext_data[jj1]; } } } if (sum != 0) { distribute = S_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num + 1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */ { if (P_marker[(HYPRE_Int) (big_i2 - col_1)] >= jj_begin_row) P_diag_data[P_marker[(HYPRE_Int) (big_i2 - col_1)]] += distribute * S_ext_data[jj1]; } else { /* * check to see if it is in the * off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd, big_i2, num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) P_offd_data[P_marker_offd[j]] += distribute * S_ext_data[jj1]; } } } } else { /* do nothing */ } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else { /* do nothing */ } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ sum = 0.; for (jj = jj_begin_row; jj < jj_end_row; jj++) sum += P_diag_data[jj]; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) sum += P_offd_data[jj]; for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= sum; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= sum; } strong_f_marker--; P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size - 1); num_cols_P_offd = 1; index = P_marker[0]; for (i = 1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, S, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return (0); }
GB_unaryop__ainv_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint8 // op(A') function: GB_tran__ainv_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint8 ( int16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint8 // op(A') function: GB_tran__ainv_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint8 ( int16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint8 // op(A') function: GB_tran__ainv_int16_uint8 // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint8 ( int16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } v4f32 _sum = (v4f32)__msa_fill_w(0); const float* kptr = (const float*)weight_data_pack4to1.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) { v4f32 _val = (v4f32)__msa_ld_w(sptr + space_ofs[k] * 4, 0); v4f32 _w = (v4f32)__msa_ld_w(kptr, 0); _sum = __msa_fmadd_w(_sum, _val, _w); kptr += 4; } } sum += __msa_fhadd_w(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except //in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed //under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR //CONDITIONS OF ANY KIND, either express or implied.See the License for the //specific language governing permissions and limitations under the License. static void convolution_pack4to1_msa(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_pack4to1, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float *bias_data_ptr = bias_data; //num_output for (int p = 0; p < outch; p++) { float *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } v4f32 _sum = (v4f32) __msa_fill_w(0); const float *kptr = (const float *)weight_data_pack4to1.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float *sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) { v4f32 _val = (v4f32) __msa_ld_w(sptr + space_ofs[k] * 4, 0); v4f32 _w = (v4f32) __msa_ld_w(kptr, 0); _sum = __msa_fmadd_w(_sum, _val, _w); kptr += 4; } } sum += __msa_fhadd_w(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
// Tencent is pleased to support the open source community by making ncnn available. // //Copyright(C) 2021 THL A29 Limited, a Tencent company.All rights reserved. // //Licensed under the BSD 3 - Clause License(the "License"); you may not use this file except //in compliance with the License.You may obtain a copy of the License at // //https://opensource.org / licenses / BSD - 3 - Clause // //Unless required by applicable law or agreed to in writing, software distributed //under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR //CONDITIONS OF ANY KIND, either express or implied.See the License for the //specific language governing permissions and limitations under the License. static void convolution_pack4to1_msa(const Mat & bottom_blob, Mat & top_blob, const Mat & weight_data_pack4to1, const Mat & bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat & activation_params, const Option & opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; //kernel offsets std: : vector < int >_space_ofs(maxk); int *space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float *bias_data_ptr = bias_data; //num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float *outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0. f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } v4f32 _sum = (v4f32) __msa_fill_w(0); const float *kptr = (const float *)weight_data_pack4to1.channel(p); //channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float *sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) { v4f32 _val = (v4f32) __msa_ld_w(sptr + space_ofs[k] * 4, 0); v4f32 _w = (v4f32) __msa_ld_w(kptr, 0); _sum = __msa_fmadd_w(_sum, _val, _w); kptr += 4; } } sum += __msa_fhadd_w(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } inline bool CheckAllowedJSON(const std::string& s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; num_threads = omp_get_num_threads(); if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } inline bool CheckAllowedJSON(const std::string& s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } inline bool CheckAllowedJSON(const std::string& s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
multibit_fmt_plug.c
/* * JtR format to crack password protected MultiBit Wallets. * * This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it * is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * All credit goes to Christopher Gurnee for making this work possible. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_multibit; #elif FMT_REGISTERS_H john_register_one(&fmt_multibit); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "md5.h" #include "escrypt/crypto_scrypt.h" #include "jumbo.h" #include "memdbg.h" #include "unicode.h" #define FORMAT_NAME "MultiBit Wallet" #define FORMAT_LABEL "multibit" #define FORMAT_TAG "$multibit$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5/scrypt AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 // just 4 is better for v2 salts static struct fmt_tests multibit_tests[] = { // Wallets created by MultiBit Classic 0.5.18 {"$multibit$1*0908a1bd44147709*c82b6d0409c1e46a4660ea6d4fa9ae12e4e234c98a71a51ced105c7e66a57ca3", "openwall"}, {"$multibit$1*2043ebb14b6d9670*24284a38a62b6a63fb0912ebc05aa9d26d6fd828134d20b9778d8d841f65f584", "openwall123"}, // MultiBit HD wallet 0.5.0 {"$multibit$2*081e3a1252c26731120d0d63783ae46f*8354d5b454e78fb15f81c9e6289ba9b8*081e3a1252c26731120d0d63783ae46f", "openwall"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt { uint32_t type; unsigned char salt[16]; unsigned char block[32]; unsigned char iv[16]; unsigned char block2[16]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); if (omp_t > 1) { self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; } #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2) goto err; if (value == 1) { if ((p = strtokm(NULL, "*")) == NULL) // salt goto err; if (hexlenl(p, &extra) != 8 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted blocks goto err; if (hexlenl(p, &extra) != 32 * 2 || extra) goto err; } else if (value == 2) { if ((p = strtokm(NULL, "*")) == NULL) // iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with hardcoded iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); if (cs.type == 1) { for (i = 0; i < 8; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else if (cs.type == 2) { for (i = 0; i < 16; i++) cs.iv[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block2[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void multibit_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int is_bitcoinj_protobuf_data(unsigned char *block) { unsigned char c; int i; // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? if (block[0] == '\x0a' && block[1] < 128 && !memcmp((const char*)block + 2, "org.", 4)) { // If it doesn't look like a lower alpha domain name of len >= 8 (e.g. 'bitcoin.'), fail (btcrecover) for (i = 6; i < 14; i++) { c = block[i]; if ((c > 'z') || ((c < 'a') && ((c != '.')))) return 0; } return 1; // success } return 0; } static int is_base58(unsigned char *buffer, int length) { unsigned char c; int i; for (i = 0; i < length; i++) { c = buffer[i]; if ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) { return 0; } } return 1; // success } static const unsigned char *salt_hardcoded = (unsigned char*)"\x35\x51\x03\x80\x75\xa3\xb0\xc5"; static const unsigned char *iv_hardcoded = (unsigned char*)"\xa3\x44\x39\x1f\x53\x83\x11\xb3\x29\x54\x86\x16\xc4\x89\x72\x3e"; static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char iv[16]; unsigned char key[32]; unsigned char outbuf[16]; AES_KEY aes_decrypt_key; int len = strlen(saved_key[index]); #ifdef _OPENMP if (cracked[index]) /* avoid false sharing of nearby elements */ #endif cracked[index] = 0; if (cur_salt->type == 1) { unsigned char c; MD5_CTX ctx; // key MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key, &ctx); // key + 16 MD5_Init(&ctx); MD5_Update(&ctx, key, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key + 16, &ctx); // iv MD5_Init(&ctx); MD5_Update(&ctx, key + 16, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(iv, &ctx); AES_set_decrypt_key(key, 256, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); c = outbuf[0]; if (c == 'L' || c == 'K' || c == '5' || c == 'Q') { // Does it look like a base58 private key (MultiBit, MultiDoge, or oldest-format Android key backup)? (btcrecover) // check if bytes are in base58 set [1-9A-HJ-NP-Za-km-z] if (is_base58(outbuf + 1, 15)) { // decrypt second block AES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_base58(outbuf, 16)) cracked[index] = 1; } } else if (c == '#') { // Does it look like a KnC for Android key backup? if (memcmp((const char*)outbuf, "# KEEP YOUR PRIV", 8) == 0) // 8 should be enough cracked[index] = 1; } else if (c == '\x0a') { // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? (btcrecover)? if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } else if (cur_salt->type == 2) { UTF16 password[PLAINTEXT_LENGTH * 2 + 1]; len = enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], len + 1); if (len < 0) len = strlen16(password); crypto_scrypt((const unsigned char*)password, (len + 1) * 2, salt_hardcoded, 8, 16384, 8, 1, key, 32); // 1 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, cur_salt->iv, 16); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; else { // 2 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, iv_hardcoded, 16); AES_cbc_encrypt(cur_salt->block2, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_multibit = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, multibit_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, multibit_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_multibit; #elif FMT_REGISTERS_H john_register_one(&fmt_multibit); #else #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "md5.h" #include "escrypt/crypto_scrypt.h" #include "jumbo.h" #include "memdbg.h" #include "unicode.h" #define FORMAT_NAME "MultiBit Wallet" #define FORMAT_LABEL "multibit" #define FORMAT_TAG "$multibit$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5/scrypt AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 // just 4 is better for v2 salts static struct fmt_tests multibit_tests[] = { // Wallets created by MultiBit Classic 0.5.18 {"$multibit$1*0908a1bd44147709*c82b6d0409c1e46a4660ea6d4fa9ae12e4e234c98a71a51ced105c7e66a57ca3", "openwall"}, {"$multibit$1*2043ebb14b6d9670*24284a38a62b6a63fb0912ebc05aa9d26d6fd828134d20b9778d8d841f65f584", "openwall123"}, // MultiBit HD wallet 0.5.0 {"$multibit$2*081e3a1252c26731120d0d63783ae46f*8354d5b454e78fb15f81c9e6289ba9b8*081e3a1252c26731120d0d63783ae46f", "openwall"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt { uint32_t type; unsigned char salt[16]; unsigned char block[32]; unsigned char iv[16]; unsigned char block2[16]; } *cur_salt; static void init(struct fmt_main *self) { saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2) goto err; if (value == 1) { if ((p = strtokm(NULL, "*")) == NULL) // salt goto err; if (hexlenl(p, &extra) != 8 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted blocks goto err; if (hexlenl(p, &extra) != 32 * 2 || extra) goto err; } else if (value == 2) { if ((p = strtokm(NULL, "*")) == NULL) // iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with hardcoded iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); if (cs.type == 1) { for (i = 0; i < 8; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else if (cs.type == 2) { for (i = 0; i < 16; i++) cs.iv[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block2[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void multibit_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int is_bitcoinj_protobuf_data(unsigned char *block) { unsigned char c; int i; // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? if (block[0] == '\x0a' && block[1] < 128 && !memcmp((const char*)block + 2, "org.", 4)) { // If it doesn't look like a lower alpha domain name of len >= 8 (e.g. 'bitcoin.'), fail (btcrecover) for (i = 6; i < 14; i++) { c = block[i]; if ((c > 'z') || ((c < 'a') && ((c != '.')))) return 0; } return 1; // success } return 0; } static int is_base58(unsigned char *buffer, int length) { unsigned char c; int i; for (i = 0; i < length; i++) { c = buffer[i]; if ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) { return 0; } } return 1; // success } static const unsigned char *salt_hardcoded = (unsigned char*)"\x35\x51\x03\x80\x75\xa3\xb0\xc5"; static const unsigned char *iv_hardcoded = (unsigned char*)"\xa3\x44\x39\x1f\x53\x83\x11\xb3\x29\x54\x86\x16\xc4\x89\x72\x3e"; static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; for (index = 0; index < count; index++) { unsigned char iv[16]; unsigned char key[32]; unsigned char outbuf[16]; AES_KEY aes_decrypt_key; int len = strlen(saved_key[index]); cracked[index] = 0; if (cur_salt->type == 1) { unsigned char c; MD5_CTX ctx; // key MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key, &ctx); // key + 16 MD5_Init(&ctx); MD5_Update(&ctx, key, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key + 16, &ctx); // iv MD5_Init(&ctx); MD5_Update(&ctx, key + 16, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(iv, &ctx); AES_set_decrypt_key(key, 256, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); c = outbuf[0]; if (c == 'L' || c == 'K' || c == '5' || c == 'Q') { // Does it look like a base58 private key (MultiBit, MultiDoge, or oldest-format Android key backup)? (btcrecover) // check if bytes are in base58 set [1-9A-HJ-NP-Za-km-z] if (is_base58(outbuf + 1, 15)) { // decrypt second block AES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_base58(outbuf, 16)) cracked[index] = 1; } } else if (c == '#') { // Does it look like a KnC for Android key backup? if (memcmp((const char*)outbuf, "# KEEP YOUR PRIV", 8) == 0) // 8 should be enough cracked[index] = 1; } else if (c == '\x0a') { // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? (btcrecover)? if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } else if (cur_salt->type == 2) { UTF16 password[PLAINTEXT_LENGTH * 2 + 1]; len = enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], len + 1); if (len < 0) len = strlen16(password); crypto_scrypt((const unsigned char*)password, (len + 1) * 2, salt_hardcoded, 8, 16384, 8, 1, key, 32); // 1 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, cur_salt->iv, 16); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; else { // 2 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, iv_hardcoded, 16); AES_cbc_encrypt(cur_salt->block2, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_multibit = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, multibit_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, multibit_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
#if FMT_EXTERNS_H extern struct fmt_main fmt_multibit; #elif FMT_REGISTERS_H john_register_one(&fmt_multibit); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "aes.h" #include "md5.h" #include "escrypt/crypto_scrypt.h" #include "jumbo.h" #include "memdbg.h" #include "unicode.h" #define FORMAT_NAME "MultiBit Wallet" #define FORMAT_LABEL "multibit" #define FORMAT_TAG "$multibit$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5/scrypt AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 // just 4 is better for v2 salts static struct fmt_tests multibit_tests[] = { // Wallets created by MultiBit Classic 0.5.18 {"$multibit$1*0908a1bd44147709*c82b6d0409c1e46a4660ea6d4fa9ae12e4e234c98a71a51ced105c7e66a57ca3", "openwall"}, {"$multibit$1*2043ebb14b6d9670*24284a38a62b6a63fb0912ebc05aa9d26d6fd828134d20b9778d8d841f65f584", "openwall123"}, // MultiBit HD wallet 0.5.0 {"$multibit$2*081e3a1252c26731120d0d63783ae46f*8354d5b454e78fb15f81c9e6289ba9b8*081e3a1252c26731120d0d63783ae46f", "openwall"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct custom_salt { uint32_t type; unsigned char salt[16]; unsigned char block[32]; unsigned char iv[16]; unsigned char block2[16]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); if (omp_t > 1) { self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; } #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2) goto err; if (value == 1) { if ((p = strtokm(NULL, "*")) == NULL) // salt goto err; if (hexlenl(p, &extra) != 8 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted blocks goto err; if (hexlenl(p, &extra) != 32 * 2 || extra) goto err; } else if (value == 2) { if ((p = strtokm(NULL, "*")) == NULL) // iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // encrypted block with hardcoded iv goto err; if (hexlenl(p, &extra) != 16 * 2 || extra) goto err; } MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); if (cs.type == 1) { for (i = 0; i < 8; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } else if (cs.type == 2) { for (i = 0; i < 16; i++) cs.iv[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 16; i++) cs.block2[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; } MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void multibit_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int is_bitcoinj_protobuf_data(unsigned char *block) { unsigned char c; int i; // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? if (block[0] == '\x0a' && block[1] < 128 && !memcmp((const char*)block + 2, "org.", 4)) { // If it doesn't look like a lower alpha domain name of len >= 8 (e.g. 'bitcoin.'), fail (btcrecover) for (i = 6; i < 14; i++) { c = block[i]; if ((c > 'z') || ((c < 'a') && ((c != '.')))) return 0; } return 1; // success } return 0; } static int is_base58(unsigned char *buffer, int length) { unsigned char c; int i; for (i = 0; i < length; i++) { c = buffer[i]; if ((c > 'z') || (c < '1') || ((c > '9') && (c < 'A')) || ((c > 'Z') && (c < 'a'))) { return 0; } } return 1; // success } static const unsigned char *salt_hardcoded = (unsigned char*)"\x35\x51\x03\x80\x75\xa3\xb0\xc5"; static const unsigned char *iv_hardcoded = (unsigned char*)"\xa3\x44\x39\x1f\x53\x83\x11\xb3\x29\x54\x86\x16\xc4\x89\x72\x3e"; static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char iv[16]; unsigned char key[32]; unsigned char outbuf[16]; AES_KEY aes_decrypt_key; int len = strlen(saved_key[index]); #ifdef _OPENMP if (cracked[index]) /* avoid false sharing of nearby elements */ #endif cracked[index] = 0; if (cur_salt->type == 1) { unsigned char c; MD5_CTX ctx; // key MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key, &ctx); // key + 16 MD5_Init(&ctx); MD5_Update(&ctx, key, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(key + 16, &ctx); // iv MD5_Init(&ctx); MD5_Update(&ctx, key + 16, 16); MD5_Update(&ctx, saved_key[index], len); MD5_Update(&ctx, cur_salt->salt, 8); MD5_Final(iv, &ctx); AES_set_decrypt_key(key, 256, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); c = outbuf[0]; if (c == 'L' || c == 'K' || c == '5' || c == 'Q') { // Does it look like a base58 private key (MultiBit, MultiDoge, or oldest-format Android key backup)? (btcrecover) // check if bytes are in base58 set [1-9A-HJ-NP-Za-km-z] if (is_base58(outbuf + 1, 15)) { // decrypt second block AES_cbc_encrypt(cur_salt->block + 16, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_base58(outbuf, 16)) cracked[index] = 1; } } else if (c == '#') { // Does it look like a KnC for Android key backup? if (memcmp((const char*)outbuf, "# KEEP YOUR PRIV", 8) == 0) // 8 should be enough cracked[index] = 1; } else if (c == '\x0a') { // Does it look like a bitcoinj protobuf (newest Bitcoin for Android backup)? (btcrecover)? if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } else if (cur_salt->type == 2) { UTF16 password[PLAINTEXT_LENGTH * 2 + 1]; len = enc_to_utf16_be(password, PLAINTEXT_LENGTH, (const unsigned char*)saved_key[index], len + 1); if (len < 0) len = strlen16(password); crypto_scrypt((const unsigned char*)password, (len + 1) * 2, salt_hardcoded, 8, 16384, 8, 1, key, 32); // 1 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, cur_salt->iv, 16); AES_cbc_encrypt(cur_salt->block, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; else { // 2 AES_set_decrypt_key(key, 128 * 2, &aes_decrypt_key); memcpy(iv, iv_hardcoded, 16); AES_cbc_encrypt(cur_salt->block2, outbuf, 16, &aes_decrypt_key, iv, AES_DECRYPT); if (is_bitcoinj_protobuf_data(outbuf)) cracked[index] = 1; } } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_multibit = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, multibit_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, multibit_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */